entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Conv1dSamePadding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/im/cimrryk5tlijxgch3lnrftbusuhrtt63nyisb4difipuywd7wx5o.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [0, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_1, %primals_2, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_3, buf0, 80, grid=grid(80), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
def conv1d_same_padding(input, weight, bias, stride, dilation, groups):
kernel, dilation, stride = weight.size(2), dilation[0], stride[0]
l_out = l_in = input.size(2)
padding = (l_out - 1) * stride - l_in + dilation * (kernel - 1) + 1
if padding % 2 != 0:
input = F.pad(input, [0, 1])
return F.conv1d(input=input, weight=weight, bias=bias, stride=stride,
padding=padding // 2, dilation=dilation, groups=groups)
class Conv1dSamePadding(nn.Conv1d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv1d_same_padding(input, self.weight, self.bias, self.
stride, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(80)](primals_3, buf0, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, buf0
def conv1d_same_padding(input, weight, bias, stride, dilation, groups):
kernel, dilation, stride = weight.size(2), dilation[0], stride[0]
l_out = l_in = input.size(2)
padding = (l_out - 1) * stride - l_in + dilation * (kernel - 1) + 1
if padding % 2 != 0:
input = F.pad(input, [0, 1])
return F.conv1d(input=input, weight=weight, bias=bias, stride=stride,
padding=padding // 2, dilation=dilation, groups=groups)
class Conv1dSamePaddingNew(nn.Conv1d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| UlysseCoteAllard/LongShortNetworkBipolar | Conv1dSamePadding | false | 5,921 | [
"Apache-2.0"
] | 1 | f6d146b967b4747f02d6589a0483d6c67394ee87 | https://github.com/UlysseCoteAllard/LongShortNetworkBipolar/tree/f6d146b967b4747f02d6589a0483d6c67394ee87 | import torch
from torch import nn
import torch.nn.functional as F
def conv1d_same_padding(input, weight, bias, stride, dilation, groups):
kernel, dilation, stride = weight.size(2), dilation[0], stride[0]
l_out = l_in = input.size(2)
padding = (l_out - 1) * stride - l_in + dilation * (kernel - 1) + 1
if padding % 2 != 0:
input = F.pad(input, [0, 1])
return F.conv1d(input=input, weight=weight, bias=bias, stride=stride,
padding=padding // 2, dilation=dilation, groups=groups)
class Model(nn.Conv1d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv1d_same_padding(input, self.weight, self.bias, self.
stride, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2o/c2o5xjfj64zz7nwpyoieudcndnaiobc6b7jtmt4fzc5odoye7s7g.py
# Topologically Sorted Source Nodes: [conv2d, instance_norm, out], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# instance_norm => add, repeat, rsqrt, var_mean
# out => relu
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16
rnumel = 3844
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
x1 = xindex % 4
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + (3844*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = triton_helpers.welford_reduce(
tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + (3844*x0)), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(
tmp5_mean, tmp5_m2, tmp5_weight, 1
)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp5, xmask)
tmp17 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp8 = tl.load(in_out_ptr0 + (r3 + (3844*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 3844.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp19 = tl.full([1, 1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tl.store(out_ptr3 + (r3 + (3844*x0)), tmp20, rmask & xmask)
tmp21 = 3844.0
tmp22 = tmp6 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hj/chjzqltjsxd7fqbzbunofdlo6342adq3i2nw3viyvqbpxrkzc3jx.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1, add], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# add => add_4
# conv2d_1 => convolution_1
# out_1 => add_2, repeat_2, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %slice_4), kwargs = {})
triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
x1 = xindex % 4
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + (3600*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = triton_helpers.welford_reduce(
tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + (3600*x0)), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(
tmp5_mean, tmp5_m2, tmp5_weight, 1
)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp5, xmask)
tmp17 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r4 = rindex % 60
r5 = (rindex // 60)
tmp8 = tl.load(in_out_ptr0 + (r3 + (3600*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp19 = tl.load(in_ptr3 + (130 + r4 + (64*r5) + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 3600.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tl.store(out_ptr3 + (r3 + (3600*x0)), tmp20, rmask & xmask)
tmp21 = 3600.0
tmp22 = tmp6 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + (x0), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf2 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 62, 62), (15376, 3844, 62, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, instance_norm, out], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
stream0 = get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0.run(buf1, primals_4, primals_3, primals_5, buf2, buf3, buf7, buf6, 16, 3844, grid=grid(16), stream=stream0)
del primals_3
del primals_4
del primals_5
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf10 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf9 = buf8; del buf8 # reuse
buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 60, 60), (14400, 3600, 60, 1), torch.float32)
buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, out_1, add], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1.run(buf9, primals_8, primals_7, primals_9, primals_1, buf10, buf11, buf15, buf14, 16, 3600, grid=grid(16), stream=stream0)
del primals_7
del primals_8
del primals_9
return (buf15, primals_1, primals_2, primals_6, buf1, buf2, reinterpret_tensor(buf6, (16, ), (1, ), 0), buf7, buf9, buf10, reinterpret_tensor(buf14, (16, ), (1, ), 0), reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
"""Redisual network block for style transfer."""
def __init__(self, nchannels):
"""Create a block of a residual network."""
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.conv2 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.norm_conv1 = nn.InstanceNorm2d(nchannels, affine=True)
self.norm_conv2 = nn.InstanceNorm2d(nchannels, affine=True)
self.nonlinearity = nn.ReLU()
def forward(self, x):
"""Forward the input through the block."""
residual = x[:, :, 2:-2, 2:-2]
out = self.nonlinearity(self.norm_conv1(self.conv1(x)))
out = self.norm_conv2(self.conv2(out))
return out + residual
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'nchannels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 16
rnumel = 3844
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
x1 = xindex % 4
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + 3844 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = (triton_helpers.
welford_reduce(tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0)
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + 3844 * x0), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(tmp5_mean,
tmp5_m2, tmp5_weight, 1)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7_tmp[:, None]
tl.store(out_ptr1 + x0, tmp5, xmask)
tmp17 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp8 = tl.load(in_out_ptr0 + (r3 + 3844 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 3844.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp19 = tl.full([1, 1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tl.store(out_ptr3 + (r3 + 3844 * x0), tmp20, rmask & xmask)
tmp21 = 3844.0
tmp22 = tmp6 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + x0, tmp25, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
xnumel = 16
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
x1 = xindex % 4
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp5_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp1 = tl.load(in_out_ptr0 + (r3 + 3600 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp5_mean_next, tmp5_m2_next, tmp5_weight_next = (triton_helpers.
welford_reduce(tmp4, tmp5_mean, tmp5_m2, tmp5_weight, roffset == 0)
)
tmp5_mean = tl.where(rmask & xmask, tmp5_mean_next, tmp5_mean)
tmp5_m2 = tl.where(rmask & xmask, tmp5_m2_next, tmp5_m2)
tmp5_weight = tl.where(rmask & xmask, tmp5_weight_next, tmp5_weight)
tl.store(in_out_ptr0 + (r3 + 3600 * x0), tmp3, rmask & xmask)
tmp5_tmp, tmp6_tmp, tmp7_tmp = triton_helpers.welford(tmp5_mean,
tmp5_m2, tmp5_weight, 1)
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tmp7_tmp[:, None]
tl.store(out_ptr1 + x0, tmp5, xmask)
tmp17 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r4 = rindex % 60
r5 = rindex // 60
tmp8 = tl.load(in_out_ptr0 + (r3 + 3600 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp19 = tl.load(in_ptr3 + (130 + r4 + 64 * r5 + 4096 * x0), rmask &
xmask, eviction_policy='evict_first', other=0.0)
tmp9 = tmp8 - tmp5
tmp10 = 3600.0
tmp11 = tmp6 / tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp16 = tmp15 * tmp0
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tl.store(out_ptr3 + (r3 + 3600 * x0), tmp20, rmask & xmask)
tmp21 = 3600.0
tmp22 = tmp6 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tl.store(out_ptr4 + x0, tmp25, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf2 = empty_strided_cuda((16,), (1,), torch.float32)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf7 = empty_strided_cuda((4, 4, 62, 62), (15376, 3844, 62, 1),
torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_red_fused__native_batch_norm_legit_convolution_relu_repeat_0[
grid(16)](buf1, primals_4, primals_3, primals_5, buf2, buf3,
buf7, buf6, 16, 3844, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
del primals_3
del primals_4
del primals_5
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf10 = empty_strided_cuda((16,), (1,), torch.float32)
buf9 = buf8
del buf8
buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf15 = empty_strided_cuda((4, 4, 60, 60), (14400, 3600, 60, 1),
torch.float32)
buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_red_fused__native_batch_norm_legit_add_convolution_repeat_1[grid
(16)](buf9, primals_8, primals_7, primals_9, primals_1, buf10,
buf11, buf15, buf14, 16, 3600, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
del primals_7
del primals_8
del primals_9
return (buf15, primals_1, primals_2, primals_6, buf1, buf2,
reinterpret_tensor(buf6, (16,), (1,), 0), buf7, buf9, buf10,
reinterpret_tensor(buf14, (16,), (1,), 0), reinterpret_tensor(buf11,
(1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16,
1, 1), (16, 1, 1, 1), 0))
class ResidualBlockNew(nn.Module):
"""Redisual network block for style transfer."""
def __init__(self, nchannels):
"""Create a block of a residual network."""
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.conv2 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.norm_conv1 = nn.InstanceNorm2d(nchannels, affine=True)
self.norm_conv2 = nn.InstanceNorm2d(nchannels, affine=True)
self.nonlinearity = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_6 = self.conv2.weight
primals_4 = self.conv2.bias
primals_5 = self.norm_conv1.weight
primals_7 = self.norm_conv1.bias
primals_8 = self.norm_conv2.weight
primals_9 = self.norm_conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| TrueMatthewKirkham/face-preserving-style-transfer | ResidualBlock | false | 5,922 | [
"MIT"
] | 1 | ae8a9509570227ea52776fba85658022124c886c | https://github.com/TrueMatthewKirkham/face-preserving-style-transfer/tree/ae8a9509570227ea52776fba85658022124c886c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Redisual network block for style transfer."""
def __init__(self, nchannels):
"""Create a block of a residual network."""
super().__init__()
self.conv1 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.conv2 = nn.Conv2d(nchannels, nchannels, kernel_size=3)
self.norm_conv1 = nn.InstanceNorm2d(nchannels, affine=True)
self.norm_conv2 = nn.InstanceNorm2d(nchannels, affine=True)
self.nonlinearity = nn.ReLU()
def forward(self, x):
"""Forward the input through the block."""
residual = x[:, :, 2:-2, 2:-2]
out = self.nonlinearity(self.norm_conv1(self.conv1(x)))
out = self.norm_conv2(self.conv2(out))
return out + residual
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f2/cf23blhmrqff3yhtwgcpt4rhmfkv3i25ry4dtkijev5qamd7hqxq.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2i/c2i7nezwym7ravbpb5z4vhtgz2n3c3queh7ut3gdufnqdoc5y3uf.py
# Topologically Sorted Source Nodes: [truediv, attn_k], Original ATen: [aten.div, aten.clone]
# Source node to ATen node mapping:
# attn_k => clone
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 2.0), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_2 = async_compile.triton('triton_poi_fused_clone_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py
# Topologically Sorted Source Nodes: [attn_k], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_k => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iv/civdgwzpphyda4rs4fr3g6w25bprv7bn4anqgivrgzavi7xr5pdl.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4p/c4p7nmmaeg3do3k4aqmp6mh43rupubtod4kype3ux64cxp3b3uub.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fr/cfrvmolzdvsekgv4ejh4rokymzyx2ewy7rgqbypf7vpbjl3l2peb.py
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# q_5 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16, 4), (4, 1))
assert_size_stride(primals_9, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_4, primals_5, buf2, 64, grid=grid(64), stream=stream0)
del buf0
del buf1
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, attn_k], Original ATen: [aten.div, aten.clone]
triton_poi_fused_clone_div_2.run(buf3, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn_k], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf4, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_k], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf5, buf11, 256, grid=grid(256), stream=stream0)
buf12 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf12, buf13, 256, grid=grid(256), stream=stream0)
del buf12
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0), reinterpret_tensor(primals_9, (16, 4), (1, 16), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf15, primals_1, 64, grid=grid(64), stream=stream0)
return (buf15, buf10, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 16), (16, 1), 0), primals_9, reinterpret_tensor(buf11, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
def __init__(self, max_pos, d_k):
super().__init__()
self.w_rpr = nn.Linear(d_k, max_pos + 1, bias=False)
def __call__(self, q, dist_matrices):
return self.forward(q, dist_matrices)
def forward(self, q, dist_matrices):
"""
:param q: [batch, heads, seq, d_k]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
q_dot_rpr = self.w_rpr(q)
attn_rpr = self.resample_rpr_product(q_dot_rpr, dist_matrices)
return attn_rpr
@staticmethod
def resample_rpr_product(q_dot_rpr, dist_matrices):
"""
:param q_dot_rpr: [batch, heads, seq, max_pos+1]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
bs, _n_heads, max_seq, _ = q_dot_rpr.shape
max_pos = q_dot_rpr.shape[-1] - 1
seq_lens = np.array([d.shape[0] for d in dist_matrices])
if (seq_lens == max_seq).all():
pos_inds = np.stack(dist_matrices)
else:
pos_inds = np.ones((bs, max_seq, max_seq), dtype=np.int32
) * np.iinfo(np.int32).max
for i_b in range(bs):
dist_matrix = dist_matrices[i_b]
n_edges = dist_matrix.shape[0]
pos_inds[i_b, :n_edges, :n_edges] = dist_matrix
pos_inds[pos_inds > max_pos] = max_pos
batch_inds = np.arange(bs)[:, None, None]
edge_inds = np.arange(max_seq)[None, :, None]
resampled_q_dot_rpr = q_dot_rpr[batch_inds, :, edge_inds, pos_inds
].permute(0, 3, 1, 2)
return resampled_q_dot_rpr
class PositionalScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention with optional positional encodings """
def __init__(self, temperature, positional_encoding=None, attn_dropout=0.1
):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.positional_encoding = positional_encoding
def forward(self, q, k, v, mask=None, dist_matrices=None):
"""
q: [batch, heads, seq, d_k] queries
k: [batch, heads, seq, d_k] keys
v: [batch, heads, seq, d_v] values
mask: [batch, 1, seq, seq] for each edge, which other edges should be accounted for. "None" means all of them.
mask is important when using local attention, or when the meshes are of different sizes.
rpr: [batch, seq, seq, d_k] positional representations
"""
attn_k = torch.matmul(q / self.temperature, k.transpose(2, 3))
if self.positional_encoding is None:
attn = attn_k
else:
attn_rpr = self.positional_encoding(q / self.temperature,
dist_matrices)
attn = attn_k + attn_rpr
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention module
from https://github.com/jadore801120/attention-is-all-you-need-pytorch
by Yu-Hsiang Huang.
use_values_as_is is our addition.
"""
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1,
use_values_as_is=False, use_positional_encoding=False,
max_relative_position=8):
super().__init__()
self.attention_type = type
self.n_head = n_head
self.d_k = d_k
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
if not use_values_as_is:
self.d_v = d_v
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
else:
self.d_v = d_model
self.w_vs = lambda x: self.__repeat_single_axis(x, -1, n_head)
self.fc = lambda x: self.__average_head_results(x, n_head)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
positional_encoding = None
if use_positional_encoding:
positional_encoding = PositionalEncoding(max_relative_position, d_k
)
self.attention = PositionalScaledDotProductAttention(temperature=
d_k ** 0.5, positional_encoding=positional_encoding)
@staticmethod
def __repeat_single_axis(x, axis, n_rep):
rep_sizes = [1] * x.ndim
rep_sizes[axis] = n_rep
x_rep = x.repeat(rep_sizes)
return x_rep
@staticmethod
def __average_head_results(x, n_head):
shape = list(x.shape)[:-1] + [n_head, -1]
avg_x = x.view(shape).mean(-2)
return avg_x
def forward(self, q, k, v, mask=None, dist_matrices=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.layer_norm(q)
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask, dist_matrices=
dist_matrices)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
return q, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16, 4), (4, 1))
assert_size_stride(primals_9, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0,
buf1, primals_4, primals_5, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_2[grid(256)](buf3, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = buf9
del buf9
triton_poi_fused_clone_6[grid(256)](buf5, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_6[grid(256)](buf12, buf13, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf12
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_9, (16, 4), (1, 16), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0)
del buf14
triton_poi_fused_add_7[grid(64)](buf15, primals_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf15, buf10, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf10, reinterpret_tensor(buf13, (16, 16), (16, 1), 0
), primals_9, reinterpret_tensor(buf11, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6
class PositionalEncoding(nn.Module):
def __init__(self, max_pos, d_k):
super().__init__()
self.w_rpr = nn.Linear(d_k, max_pos + 1, bias=False)
def __call__(self, q, dist_matrices):
return self.forward(q, dist_matrices)
def forward(self, q, dist_matrices):
"""
:param q: [batch, heads, seq, d_k]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
q_dot_rpr = self.w_rpr(q)
attn_rpr = self.resample_rpr_product(q_dot_rpr, dist_matrices)
return attn_rpr
@staticmethod
def resample_rpr_product(q_dot_rpr, dist_matrices):
"""
:param q_dot_rpr: [batch, heads, seq, max_pos+1]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
bs, _n_heads, max_seq, _ = q_dot_rpr.shape
max_pos = q_dot_rpr.shape[-1] - 1
seq_lens = np.array([d.shape[0] for d in dist_matrices])
if (seq_lens == max_seq).all():
pos_inds = np.stack(dist_matrices)
else:
pos_inds = np.ones((bs, max_seq, max_seq), dtype=np.int32
) * np.iinfo(np.int32).max
for i_b in range(bs):
dist_matrix = dist_matrices[i_b]
n_edges = dist_matrix.shape[0]
pos_inds[i_b, :n_edges, :n_edges] = dist_matrix
pos_inds[pos_inds > max_pos] = max_pos
batch_inds = np.arange(bs)[:, None, None]
edge_inds = np.arange(max_seq)[None, :, None]
resampled_q_dot_rpr = q_dot_rpr[batch_inds, :, edge_inds, pos_inds
].permute(0, 3, 1, 2)
return resampled_q_dot_rpr
class PositionalScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention with optional positional encodings """
def __init__(self, temperature, positional_encoding=None, attn_dropout=0.1
):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.positional_encoding = positional_encoding
def forward(self, q, k, v, mask=None, dist_matrices=None):
"""
q: [batch, heads, seq, d_k] queries
k: [batch, heads, seq, d_k] keys
v: [batch, heads, seq, d_v] values
mask: [batch, 1, seq, seq] for each edge, which other edges should be accounted for. "None" means all of them.
mask is important when using local attention, or when the meshes are of different sizes.
rpr: [batch, seq, seq, d_k] positional representations
"""
attn_k = torch.matmul(q / self.temperature, k.transpose(2, 3))
if self.positional_encoding is None:
attn = attn_k
else:
attn_rpr = self.positional_encoding(q / self.temperature,
dist_matrices)
attn = attn_k + attn_rpr
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttentionNew(nn.Module):
"""
Multi-Head Attention module
from https://github.com/jadore801120/attention-is-all-you-need-pytorch
by Yu-Hsiang Huang.
use_values_as_is is our addition.
"""
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1,
use_values_as_is=False, use_positional_encoding=False,
max_relative_position=8):
super().__init__()
self.attention_type = type
self.n_head = n_head
self.d_k = d_k
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
if not use_values_as_is:
self.d_v = d_v
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
else:
self.d_v = d_model
self.w_vs = lambda x: self.__repeat_single_axis(x, -1, n_head)
self.fc = lambda x: self.__average_head_results(x, n_head)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
positional_encoding = None
if use_positional_encoding:
positional_encoding = PositionalEncoding(max_relative_position, d_k
)
self.attention = PositionalScaledDotProductAttention(temperature=
d_k ** 0.5, positional_encoding=positional_encoding)
@staticmethod
def __repeat_single_axis(x, axis, n_rep):
rep_sizes = [1] * x.ndim
rep_sizes[axis] = n_rep
x_rep = x.repeat(rep_sizes)
return x_rep
@staticmethod
def __average_head_results(x, n_head):
shape = list(x.shape)[:-1] + [n_head, -1]
avg_x = x.view(shape).mean(-2)
return avg_x
def forward(self, input_0, input_1, input_2):
primals_6 = self.w_qs.weight
primals_7 = self.w_ks.weight
primals_8 = self.w_vs.weight
primals_9 = self.fc.weight
primals_4 = self.layer_norm.weight
primals_5 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| TomerRonen34/MeshCNN | MultiHeadAttention | false | 5,923 | [
"MIT"
] | 1 | 8c50f3804c48044b78572d652a42184640e904d9 | https://github.com/TomerRonen34/MeshCNN/tree/8c50f3804c48044b78572d652a42184640e904d9 | import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
def __init__(self, max_pos, d_k):
super().__init__()
self.w_rpr = nn.Linear(d_k, max_pos + 1, bias=False)
def __call__(self, q, dist_matrices):
return self.forward(q, dist_matrices)
def forward(self, q, dist_matrices):
"""
:param q: [batch, heads, seq, d_k]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
q_dot_rpr = self.w_rpr(q)
attn_rpr = self.resample_rpr_product(q_dot_rpr, dist_matrices)
return attn_rpr
@staticmethod
def resample_rpr_product(q_dot_rpr, dist_matrices):
"""
:param q_dot_rpr: [batch, heads, seq, max_pos+1]
:param dist_matrices: list of dist_matrix , each of size n_edges X nedges
:return: resampled_q_dot_rpr: [batch, heads, seq, seq]
"""
bs, _n_heads, max_seq, _ = q_dot_rpr.shape
max_pos = q_dot_rpr.shape[-1] - 1
seq_lens = np.array([d.shape[0] for d in dist_matrices])
if (seq_lens == max_seq).all():
pos_inds = np.stack(dist_matrices)
else:
pos_inds = np.ones((bs, max_seq, max_seq), dtype=np.int32
) * np.iinfo(np.int32).max
for i_b in range(bs):
dist_matrix = dist_matrices[i_b]
n_edges = dist_matrix.shape[0]
pos_inds[i_b, :n_edges, :n_edges] = dist_matrix
pos_inds[pos_inds > max_pos] = max_pos
batch_inds = np.arange(bs)[:, None, None]
edge_inds = np.arange(max_seq)[None, :, None]
resampled_q_dot_rpr = q_dot_rpr[batch_inds, :, edge_inds, pos_inds
].permute(0, 3, 1, 2)
return resampled_q_dot_rpr
class PositionalScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention with optional positional encodings """
def __init__(self, temperature, positional_encoding=None, attn_dropout=0.1
):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.positional_encoding = positional_encoding
def forward(self, q, k, v, mask=None, dist_matrices=None):
"""
q: [batch, heads, seq, d_k] queries
k: [batch, heads, seq, d_k] keys
v: [batch, heads, seq, d_v] values
mask: [batch, 1, seq, seq] for each edge, which other edges should be accounted for. "None" means all of them.
mask is important when using local attention, or when the meshes are of different sizes.
rpr: [batch, seq, seq, d_k] positional representations
"""
attn_k = torch.matmul(q / self.temperature, k.transpose(2, 3))
if self.positional_encoding is None:
attn = attn_k
else:
attn_rpr = self.positional_encoding(q / self.temperature,
dist_matrices)
attn = attn_k + attn_rpr
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class Model(nn.Module):
"""
Multi-Head Attention module
from https://github.com/jadore801120/attention-is-all-you-need-pytorch
by Yu-Hsiang Huang.
use_values_as_is is our addition.
"""
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1,
use_values_as_is=False, use_positional_encoding=False,
max_relative_position=8):
super().__init__()
self.attention_type = type
self.n_head = n_head
self.d_k = d_k
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
if not use_values_as_is:
# ... truncated (>4000 chars) for memory efficiency |
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l4/cl4b2qmegciihafkh7hzuy52pdx4j53yd4lmklixpticbzk2nyq2.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (16, 8), (8, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 16), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (buf3, buf0, buf2, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class FFN(nn.Module):
def __init__(self, d):
super().__init__()
self.fc_1 = nn.Linear(2 * d, 4 * d)
self.drop = nn.Dropout(0.1)
self.fc_2 = nn.Linear(4 * d, d)
def forward(self, x_1, x_2):
x = self.fc_1(torch.cat((x_1, x_2), 1))
x = F.relu(x)
x = self.drop(x)
x = self.fc_2(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (16, 8), (8, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 16), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3)
del primals_6
return buf3, buf0, buf2, primals_5
class FFNNew(nn.Module):
def __init__(self, d):
super().__init__()
self.fc_1 = nn.Linear(2 * d, 4 * d)
self.drop = nn.Dropout(0.1)
self.fc_2 = nn.Linear(4 * d, d)
def forward(self, input_0, input_1):
primals_3 = self.fc_1.weight
primals_4 = self.fc_1.bias
primals_5 = self.fc_2.weight
primals_6 = self.fc_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| VKCOM/TopicsDataset | FFN | false | 5,924 | [
"MIT"
] | 1 | 149919321ba61a8f17b22f62f60f4aedec43d72b | https://github.com/VKCOM/TopicsDataset/tree/149919321ba61a8f17b22f62f60f4aedec43d72b | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d):
super().__init__()
self.fc_1 = nn.Linear(2 * d, 4 * d)
self.drop = nn.Dropout(0.1)
self.fc_2 = nn.Linear(4 * d, d)
def forward(self, x_1, x_2):
x = self.fc_1(torch.cat((x_1, x_2), 1))
x = F.relu(x)
x = self.drop(x)
x = self.fc_2(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
GSAHelper | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ap/capazs53vf7hafcoa3lzpsnuw7vvpg5foinomftp5aheilxucgrj.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, %addmm_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aq/caq6h3dan4vzhhsrm4lvbcqdagc4iozbtbt5fld7rswxmgjafkd4.py
# Topologically Sorted Source Nodes: [kq, k_2, q_2], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# k_2 => mul_1
# kq => sigmoid
# q_2 => mul_2
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp4 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/r2/cr27m63n6a7wbqa5bchmoh4ayk7vptybueylj27efdfoaxonqpmx.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div_1, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = tmp7 / tmp10
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf2, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kq, k_2, q_2], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(primals_1, buf3, primals_6, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf4, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6)
buf9 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf6, buf9, 1, 16, grid=grid(1), stream=stream0)
del buf6
return (reinterpret_tensor(buf9, (4, 4), (4, 1), 0), primals_1, primals_6, buf0, buf1, buf2, buf3, buf4, buf9, buf5, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GSAHelper(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, k, q):
m = k.shape[0]
k_1 = self.fc_k(k)
q_1 = self.fc_q(q)
kq = nn.Sigmoid()(self.fc_kq(torch.mul(k_1, q_1)))
k_2 = torch.mul(k, kq)
q_2 = torch.mul(q, kq)
mul = torch.mm(k_2, torch.t(q_2)) / self.d ** (1.0 / 2)
a = nn.Softmax()(torch.flatten(mul)).view(m, m)
return a
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp4 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = tmp7 / tmp10
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf0, buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf2, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(16)](primals_1, buf3, primals_6,
buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(buf5, (4, 4), (1, 4), 0),
out=buf6)
buf9 = empty_strided_cuda((16,), (1,), torch.float32)
triton_per_fused__softmax_2[grid(1)](buf6, buf9, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf6
return (reinterpret_tensor(buf9, (4, 4), (4, 1), 0), primals_1,
primals_6, buf0, buf1, buf2, buf3, buf4, buf9, buf5, primals_7)
class GSAHelperNew(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, input_0, input_1):
primals_1 = self.fc_k.weight
primals_3 = self.fc_k.bias
primals_2 = self.fc_q.weight
primals_5 = self.fc_q.bias
primals_4 = self.fc_kq.weight
primals_8 = self.fc_kq.bias
primals_6 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| VKCOM/TopicsDataset | GSAHelper | false | 5,925 | [
"MIT"
] | 1 | 149919321ba61a8f17b22f62f60f4aedec43d72b | https://github.com/VKCOM/TopicsDataset/tree/149919321ba61a8f17b22f62f60f4aedec43d72b | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, k, q):
m = k.shape[0]
k_1 = self.fc_k(k)
q_1 = self.fc_q(q)
kq = nn.Sigmoid()(self.fc_kq(torch.mul(k_1, q_1)))
k_2 = torch.mul(k, kq)
q_2 = torch.mul(q, kq)
mul = torch.mm(k_2, torch.t(q_2)) / self.d ** (1.0 / 2)
a = nn.Softmax()(torch.flatten(mul)).view(m, m)
return a
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
PoolFormerBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dr/cdrbbq25gaacwdmuqqn76ytppvbzlwbqwo7aazovogwtjetsi3kf.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add, add_1, mul_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_7), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_4), kwargs = {})
triton_per_fused_native_group_norm_0 = async_compile.triton('triton_per_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + (64*x0)), tmp27, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ou/coupyg345piz42etl5al6lwpef5kkccwwnxo3krwbluktyaryaxz.py
# Topologically Sorted Source Nodes: [avg_pool2d, group_norm_1], Original ATen: [aten.avg_pool2d, aten.native_group_norm]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# group_norm_1 => add_3, add_4, mul_4, rsqrt_1, var_mean_1
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%add_1, [3, 3], [1, 1], [1, 1], False, False), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_15), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %unsqueeze_12), kwargs = {})
triton_per_fused_avg_pool2d_native_group_norm_1 = async_compile.triton('triton_per_fused_avg_pool2d_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_avg_pool2d_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = (rindex // 4) % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp54 = tl.load(in_ptr1 + (r6 + (64*x0)), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + (64*x0)), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + (r3), None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + (r3), None, eviction_policy='evict_last')
tmp0 = (-1) + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + r6 + (64*x0)), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + r6 + (64*x0)), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + r6 + (64*x0)), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + r6 + (64*x0)), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + (64*x0)), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + (64*x0)), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + (64*x0)), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + (64*x0)), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + (64*x0)), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (((0) * ((0) >= ((-1) + r1)) + ((-1) + r1) * (((-1) + r1) > (0)))*((0) * ((0) >= ((-1) + r2)) + ((-1) + r2) * (((-1) + r2) > (0)))) + (((4) * ((4) <= (2 + r1)) + (2 + r1) * ((2 + r1) < (4)))*((4) * ((4) <= (2 + r2)) + (2 + r2) * ((2 + r2) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + r1)) + ((-1) + r1) * (((-1) + r1) > (0)))*((4) * ((4) <= (2 + r2)) + (2 + r2) * ((2 + r2) < (4)))) + ((-1)*((0) * ((0) >= ((-1) + r2)) + ((-1) + r2) * (((-1) + r2) > (0)))*((4) * ((4) <= (2 + r1)) + (2 + r1) * ((2 + r1) < (4))))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + (64*x0)), tmp53, xmask)
tl.store(out_ptr3 + (r6 + (64*x0)), tmp86, xmask)
tl.store(out_ptr4 + (x0), tmp81, xmask)
tl.store(out_ptr1 + (x0), tmp69, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdslzbvz4m7vmsmypc7vpzz4mhpkezpihgu3s34qnd5bo5vg7h5.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => add_5, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_5), kwargs = {})
triton_poi_fused_convolution_gelu_2 = async_compile.triton('triton_poi_fused_convolution_gelu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2s/c2sr6yssngw5ceeda7lntygiwg4bblf5mrmsznbnzvcgr3mzmwrc.py
# Topologically Sorted Source Nodes: [sub, mul, x, x_4, mul_1, x_6], Original ATen: [aten.sub, aten.mul, aten.add, aten.convolution]
# Source node to ATen node mapping:
# mul => mul_2
# mul_1 => mul_8
# sub => sub_1
# x => add_2
# x_4 => convolution_1
# x_6 => add_6
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d, %add_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %sub_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %mul_2), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_7, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_9, %convolution_1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_8), kwargs = {})
triton_poi_fused_add_convolution_mul_sub_3 = async_compile.triton('triton_poi_fused_add_convolution_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x3), xmask)
tmp6 = tl.load(in_ptr4 + (x3), xmask)
tmp10 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_0.run(primals_4, primals_2, primals_3, buf0, buf3, buf16, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d, group_norm_1], Original ATen: [aten.avg_pool2d, aten.native_group_norm]
triton_per_fused_avg_pool2d_native_group_norm_1.run(buf3, primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8, buf9, 4, 64, grid=grid(4), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.gelu]
triton_poi_fused_convolution_gelu_2.run(buf11, primals_9, buf12, 1024, grid=grid(1024), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, x, x_4, mul_1, x_6], Original ATen: [aten.sub, aten.mul, aten.add, aten.convolution]
triton_poi_fused_add_convolution_mul_sub_3.run(buf14, primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15, 256, grid=grid(256), stream=stream0)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8, primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1, 1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12, buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlock(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).
unsqueeze(-1) * self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).
unsqueeze(-1) * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 4 % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = rindex // 16
tmp54 = tl.load(in_ptr1 + (r6 + 64 * x0), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + 64 * x0), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + r3, None, eviction_policy='evict_last')
tmp0 = -1 + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + r6 + 64 * x0), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + r6 + 64 * x0), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + r6 + 64 * x0), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + r6 + 64 * x0), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + 64 * x0), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + 64 * x0), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + 64 * x0), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + 64 * x0), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + 64 * x0), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (0 * (0 >= -
1 + r2) + (-1 + r2) * (-1 + r2 > 0)) + (4 * (4 <= 2 + r1) + (2 + r1
) * (2 + r1 < 4)) * (4 * (4 <= 2 + r2) + (2 + r2) * (2 + r2 < 4)
) + -1 * (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (4 * (4 <=
2 + r2) + (2 + r2) * (2 + r2 < 4)) + -1 * (0 * (0 >= -1 + r2) + (-1 +
r2) * (-1 + r2 > 0)) * (4 * (4 <= 2 + r1) + (2 + r1) * (2 + r1 < 4))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + 64 * x0), tmp53, xmask)
tl.store(out_ptr3 + (r6 + 64 * x0), tmp86, xmask)
tl.store(out_ptr4 + x0, tmp81, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp6 = tl.load(in_ptr4 + x3, xmask)
tmp10 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(4)](primals_4, primals_2,
primals_3, buf0, buf3, buf16, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_avg_pool2d_native_group_norm_1[grid(4)](buf3,
primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8,
buf9, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_gelu_2[grid(1024)](buf11, primals_9,
buf12, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_sub_3[grid(256)](buf14,
primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8,
primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1,
1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12,
buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0),
reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0))
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlockNew(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, input_0):
primals_1 = self.layer_scale_1
primals_2 = self.layer_scale_2
primals_3 = self.norm1.weight
primals_5 = self.norm1.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_8 = self.mlp.fc1.weight
primals_9 = self.mlp.fc1.bias
primals_10 = self.mlp.fc2.weight
primals_11 = self.mlp.fc2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| TranNhiem/MVAR_SSL | PoolFormerBlock | false | 5,926 | [
"MIT"
] | 1 | 339964db4d40f06a92866675ff99ef67cd968cca | https://github.com/TranNhiem/MVAR_SSL/tree/339964db4d40f06a92866675ff99ef67cd968cca | import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Model(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale
# ... truncated (>4000 chars) for memory efficiency |
ActorCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# a => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# std => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {})
triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + (x2), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (1, 4), (4, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.exp]
triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [std, action], Original ATen: [aten.exp, aten.normal]
buf6 = torch.ops.aten.normal.Tensor_Tensor(reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf9, primals_10, 256, grid=grid(256), stream=stream0)
del primals_10
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf11, primals_12, 256, grid=grid(256), stream=stream0)
del primals_12
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_14
return (reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf7, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf9, buf11, primals_13, primals_11, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def log_normal_density(x, mean, log_std, std):
"""returns guassian density given x on log scale"""
variance = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * variance) - 0.5 * np.log(2 * np.pi
) - log_std
log_density = log_density.sum(dim=1, keepdim=True)
return log_density
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(ActorCritic, self).__init__()
self.logstd = nn.Parameter(torch.zeros(num_actions))
self.a_fc1 = nn.Linear(num_inputs, hidden_dim)
self.a_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.a_fc3 = nn.Linear(hidden_dim, num_actions)
self.a_fc3.weight.data.mul_(0.1)
self.a_fc3.bias.data.mul_(0.0)
self.c_fc1 = nn.Linear(num_inputs, hidden_dim)
self.c_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.c_fc3 = nn.Linear(hidden_dim, 1)
self.c_fc3.weight.data.mul_(0.1)
self.c_fc3.bias.data.mul_(0.0)
def forward(self, x):
a = F.tanh(self.a_fc1(x))
a = F.tanh(self.a_fc2(a))
mean = self.a_fc3(a)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
action = torch.normal(mean, std)
v = F.tanh(self.c_fc1(x))
v = F.tanh(self.c_fc2(v))
v = self.c_fc3(v)
return v, action, mean
def evaluate(self, x, action):
v, _, mean = self.forward(x)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
logprob = log_normal_density(action, mean, logstd, std)
dist_entropy = 0.5 + 0.5 * math.log(2 * math.pi) + logstd
dist_entropy = dist_entropy.sum(-1).mean()
return v, logprob, dist_entropy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (1, 4), (4, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = torch.ops.aten.normal.Tensor_Tensor(reinterpret_tensor(buf4,
(4, 4, 4, 4), (64, 16, 4, 1), 0), buf5)
buf7 = buf6
del buf6
buf8 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused_tanh_0[grid(256)](buf9, primals_10, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused_tanh_0[grid(256)](buf11, primals_12, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_12
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_14
return reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0
), buf7, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf9, buf11, primals_13, primals_11, primals_6, primals_4
def log_normal_density(x, mean, log_std, std):
"""returns guassian density given x on log scale"""
variance = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * variance) - 0.5 * np.log(2 * np.pi
) - log_std
log_density = log_density.sum(dim=1, keepdim=True)
return log_density
class ActorCriticNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(ActorCriticNew, self).__init__()
self.logstd = nn.Parameter(torch.zeros(num_actions))
self.a_fc1 = nn.Linear(num_inputs, hidden_dim)
self.a_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.a_fc3 = nn.Linear(hidden_dim, num_actions)
self.a_fc3.weight.data.mul_(0.1)
self.a_fc3.bias.data.mul_(0.0)
self.c_fc1 = nn.Linear(num_inputs, hidden_dim)
self.c_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.c_fc3 = nn.Linear(hidden_dim, 1)
self.c_fc3.weight.data.mul_(0.1)
self.c_fc3.bias.data.mul_(0.0)
def evaluate(self, x, action):
v, _, mean = self.forward(x)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
logprob = log_normal_density(action, mean, logstd, std)
dist_entropy = 0.5 + 0.5 * math.log(2 * math.pi) + logstd
dist_entropy = dist_entropy.sum(-1).mean()
return v, logprob, dist_entropy
def forward(self, input_0):
primals_2 = self.logstd
primals_1 = self.a_fc1.weight
primals_5 = self.a_fc1.bias
primals_4 = self.a_fc2.weight
primals_7 = self.a_fc2.bias
primals_6 = self.a_fc3.weight
primals_8 = self.a_fc3.bias
primals_9 = self.c_fc1.weight
primals_10 = self.c_fc1.bias
primals_11 = self.c_fc2.weight
primals_12 = self.c_fc2.bias
primals_13 = self.c_fc3.weight
primals_14 = self.c_fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1], output[2]
| Tzenthin/pytorch-ppo-sac-HalfCheetah-v2 | ActorCritic | false | 5,927 | [
"MIT"
] | 1 | 282a4104ec577056a141909e29dc97ed425a566c | https://github.com/Tzenthin/pytorch-ppo-sac-HalfCheetah-v2/tree/282a4104ec577056a141909e29dc97ed425a566c | import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def log_normal_density(x, mean, log_std, std):
"""returns guassian density given x on log scale"""
variance = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * variance) - 0.5 * np.log(2 * np.pi
) - log_std
log_density = log_density.sum(dim=1, keepdim=True)
return log_density
class Model(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super().__init__()
self.logstd = nn.Parameter(torch.zeros(num_actions))
self.a_fc1 = nn.Linear(num_inputs, hidden_dim)
self.a_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.a_fc3 = nn.Linear(hidden_dim, num_actions)
self.a_fc3.weight.data.mul_(0.1)
self.a_fc3.bias.data.mul_(0.0)
self.c_fc1 = nn.Linear(num_inputs, hidden_dim)
self.c_fc2 = nn.Linear(hidden_dim, hidden_dim)
self.c_fc3 = nn.Linear(hidden_dim, 1)
self.c_fc3.weight.data.mul_(0.1)
self.c_fc3.bias.data.mul_(0.0)
def forward(self, x):
a = F.tanh(self.a_fc1(x))
a = F.tanh(self.a_fc2(a))
mean = self.a_fc3(a)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
action = torch.normal(mean, std)
v = F.tanh(self.c_fc1(x))
v = F.tanh(self.c_fc2(v))
v = self.c_fc3(v)
return v, action, mean
def evaluate(self, x, action):
v, _, mean = self.forward(x)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
logprob = log_normal_density(action, mean, logstd, std)
dist_entropy = 0.5 + 0.5 * math.log(2 * math.pi) + logstd
dist_entropy = dist_entropy.sum(-1).mean()
return v, logprob, dist_entropy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AttentionPool2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [0], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3y/c3yxwtlf7q4oo5ov7royd257tecdxmuzqqag2za7buohahsb7mfs.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# x_1 => cat
# x_2 => add
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %permute],), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {})
triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 17, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uc/cucgrga44gtlwzw6ehoy4jrwzm5fghn3ljf7iugmdjhe6m7mjcas.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_6, %primals_7, %primals_8],), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((-8) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gq/cgqswernq7km7s5e6iqvidwhg6nuo5b5zp6m5py3pnbrdzepe236.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_10, 1.0), kwargs = {})
# %permute_17 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_13, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_3 = async_compile.triton('triton_poi_fused_mul_transpose_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-8) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qz/cqzbvlh3tf752q5rj44eh5o54zbmctvajzgosqadhjdb5lbyqjy2.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_7, 1.0), kwargs = {})
# %permute_18 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_14, [0, 2, 1]), kwargs = {})
triton_poi_fused_mul_transpose_4 = async_compile.triton('triton_poi_fused_mul_transpose_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 32], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = 4 + y0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1, 1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q4/cq45jdoeuvgpxv4zofa7tafzriebamhh2wkhnkrmvo3v75s7gbkj.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, any_1, div, eq, exp, full_default, logical_not, logical_not_1, sub, sum_1, where
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_15, -inf), kwargs = {})
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {})
# %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {})
# %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 17, 17], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {})
triton_per_fused__safe_softmax_5 = async_compile.triton('triton_per_fused__safe_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = (xindex // 68)
tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float("-inf")
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = (tmp14 != 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + (17*x2) + (1184*x3)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hp/chpbwsfm77p7craq5ckxpabfykbcahcp5xaoqi5y3wp6bfgatgpn.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# multi_head_attention_forward => bmm_1
# Graph fragment:
# %bmm_1 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_16, %view_17), kwargs = {})
triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = (xindex // 289)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (289*(x1 % 4)) + (1184*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qb/cqb3dcbu2gzzir2rbde24vaunu7kucgqq3ftpydndkry6kdjyyqy.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (17*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add]
triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_6, primals_7, primals_8, buf4, 12, grid=grid(12), stream=stream0)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(buf4, (4, ), (1, ), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_3.run(buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, grid=grid(16, 17), stream=stream0)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0); del buf2 # reuse
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose]
triton_poi_fused_mul_transpose_4.run(buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, grid=grid(16, 17), stream=stream0)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax]
triton_per_fused__safe_softmax_5.run(buf8, buf12, 272, 17, grid=grid(272), stream=stream0)
buf13 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
triton_poi_fused_bmm_6.run(buf12, buf13, 4624, grid=grid(4624), stream=stream0)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf14, buf15, 17, 16, grid=grid(17, 16), stream=stream0)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
return (reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((17, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :]
x, _ = F.multi_head_attention_forward(query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1], num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.
weight, v_proj_weight=self.v_proj.weight, in_proj_weight=torch.
cat([self.q_proj.weight, self.k_proj.weight, self.v_proj.weight
]), in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias,
self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=
False, dropout_p=0.0, out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True,
training=self.training, need_weights=False)
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy
='evict_last')
tmp1 = 4 + y0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]),
tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1, 1], 12, tl.int64)
tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]),
tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask)
tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask)
@triton.jit
def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 272
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
x2 = xindex % 68
x3 = xindex // 68
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 289
x1 = xindex // 289
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 17
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (17, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((12,), (1,), torch.float32)
triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8,
buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8),
reinterpret_tensor(buf1, (68, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta
=1, out=buf5)
del buf4
buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32)
buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32)
triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6,
primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=8,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0)
del buf2
buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32)
triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6,
primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=8,
num_warps=4, num_stages=1)
del buf3
del primals_6
del primals_7
del primals_8
buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8)
buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1),
torch.float32)
triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17,
XBLOCK=1, num_warps=2, num_stages=1)
buf13 = buf8
del buf8
triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=128,
num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0)
del buf7
extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1,
16, 0), 0), out=buf14)
del buf13
buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK
=16, YBLOCK=32, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0)
del buf14
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(
buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4),
(4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1,
16), 0), buf17, buf18, primals_5, primals_4, primals_3
class AttentionPool2dNew(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.k_proj.weight
primals_6 = self.k_proj.bias
primals_4 = self.q_proj.weight
primals_7 = self.q_proj.bias
primals_5 = self.v_proj.weight
primals_8 = self.v_proj.bias
primals_9 = self.c_proj.weight
primals_10 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| Vaishaal/CLIP | AttentionPool2d | false | 5,928 | [
"MIT"
] | 1 | 16adcf2a5ff41d6a3f1bb45165aa348031fdbafe | https://github.com/Vaishaal/CLIP/tree/16adcf2a5ff41d6a3f1bb45165aa348031fdbafe | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads:
'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim **
2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :]
x, _ = F.multi_head_attention_forward(query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1], num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.
weight, v_proj_weight=self.v_proj.weight, in_proj_weight=torch.
cat([self.q_proj.weight, self.k_proj.weight, self.v_proj.weight
]), in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias,
self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=
False, dropout_p=0.0, out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True,
training=self.training, need_weights=False)
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AttnBahd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/em/cemtk5jzpjp2in5ke7jredqemtdfwxti5iesvu5f6ak4earru6ed.py
# Topologically Sorted Source Nodes: [add, wquh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# wquh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %view_1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + (x4), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# attn_weights => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_4, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# attn_weights => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, wquh], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf2, buf1, primals_6, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
del primals_6
buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, buf2, buf6, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
class AttnBahd(nn.Module):
def __init__(self, encoder_out_dim, decoder_hid_dim, attn_dim=None):
"""
Attention mechanism
:param encoder_out_dim: Dimension of hidden states of the encoder h_j
:param decoder_hid_dim: Dimension of the hidden states of the decoder s_{i-1}
:param attn_dim: Dimension of the internal state (default: same as decoder).
"""
super(AttnBahd, self).__init__()
self.h_dim = encoder_out_dim
self.s_dim = decoder_hid_dim
self.a_dim = self.s_dim if attn_dim is None else attn_dim
self.build()
def build(self):
self.U = nn.Linear(self.h_dim, self.a_dim)
self.W = nn.Linear(self.s_dim, self.a_dim)
self.v = nn.Linear(self.a_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.LogSoftmax(dim=1)
def precmp_U(self, enc_outputs):
"""
Precompute U matrix for computational efficiency.
The result is a # SL x B x self.attn_dim matrix.
:param enc_outputs: # SL x B x self.attn_dim matrix
:return: input projected by the weight matrix of the attention module.
"""
src_seq_len, batch_size, _enc_dim = enc_outputs.size()
enc_outputs_reshaped = enc_outputs.view(-1, self.h_dim)
proj = self.U(enc_outputs_reshaped)
proj_reshaped = proj.view(src_seq_len, batch_size, self.a_dim)
return proj_reshaped
def forward(self, prev_h_batch, enc_outputs):
"""
:param prev_h_batch: 1 x B x dec_dim
:param enc_outputs: SL x B x (num_directions * encoder_hidden_dim)
:return: attn weights: # B x SL
"""
src_seq_len, batch_size, _enc_dim = enc_outputs.size()
u_input = enc_outputs.view(-1, self.h_dim)
uh = self.U(u_input).view(src_seq_len, batch_size, self.a_dim)
wq = self.W(prev_h_batch.view(-1, self.s_dim)).unsqueeze(0)
wq3d = wq.expand_as(uh)
wquh = self.tanh(wq3d + uh)
attn_unnorm_scores = self.v(wquh.view(-1, self.a_dim)).view(batch_size,
src_seq_len)
attn_weights = self.softmax(attn_unnorm_scores)
return attn_weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_out_dim': 4, 'decoder_hid_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4),
(1, 4), 0), out=buf1)
del primals_5
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(64)](buf2, buf1, primals_6,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
del primals_6
buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0)
del buf4
triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), primals_4, buf2, buf6, primals_7
class AttnBahdNew(nn.Module):
def __init__(self, encoder_out_dim, decoder_hid_dim, attn_dim=None):
"""
Attention mechanism
:param encoder_out_dim: Dimension of hidden states of the encoder h_j
:param decoder_hid_dim: Dimension of the hidden states of the decoder s_{i-1}
:param attn_dim: Dimension of the internal state (default: same as decoder).
"""
super(AttnBahdNew, self).__init__()
self.h_dim = encoder_out_dim
self.s_dim = decoder_hid_dim
self.a_dim = self.s_dim if attn_dim is None else attn_dim
self.build()
def build(self):
self.U = nn.Linear(self.h_dim, self.a_dim)
self.W = nn.Linear(self.s_dim, self.a_dim)
self.v = nn.Linear(self.a_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.LogSoftmax(dim=1)
def precmp_U(self, enc_outputs):
"""
Precompute U matrix for computational efficiency.
The result is a # SL x B x self.attn_dim matrix.
:param enc_outputs: # SL x B x self.attn_dim matrix
:return: input projected by the weight matrix of the attention module.
"""
src_seq_len, batch_size, _enc_dim = enc_outputs.size()
enc_outputs_reshaped = enc_outputs.view(-1, self.h_dim)
proj = self.U(enc_outputs_reshaped)
proj_reshaped = proj.view(src_seq_len, batch_size, self.a_dim)
return proj_reshaped
def forward(self, input_0, input_1):
primals_2 = self.U.weight
primals_3 = self.U.bias
primals_4 = self.W.weight
primals_6 = self.W.bias
primals_7 = self.v.weight
primals_8 = self.v.bias
primals_5 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| UKPLab/acl2018-msr-workshop-binlin | AttnBahd | false | 5,929 | [
"Apache-2.0"
] | 1 | 9b8021dfa14a8bc131df117fa9985699fc8cedea | https://github.com/UKPLab/acl2018-msr-workshop-binlin/tree/9b8021dfa14a8bc131df117fa9985699fc8cedea | import torch
from torch import nn as nn
class Model(nn.Module):
def __init__(self, encoder_out_dim, decoder_hid_dim, attn_dim=None):
"""
Attention mechanism
:param encoder_out_dim: Dimension of hidden states of the encoder h_j
:param decoder_hid_dim: Dimension of the hidden states of the decoder s_{i-1}
:param attn_dim: Dimension of the internal state (default: same as decoder).
"""
super().__init__()
self.h_dim = encoder_out_dim
self.s_dim = decoder_hid_dim
self.a_dim = self.s_dim if attn_dim is None else attn_dim
self.build()
def build(self):
self.U = nn.Linear(self.h_dim, self.a_dim)
self.W = nn.Linear(self.s_dim, self.a_dim)
self.v = nn.Linear(self.a_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.LogSoftmax(dim=1)
def precmp_U(self, enc_outputs):
"""
Precompute U matrix for computational efficiency.
The result is a # SL x B x self.attn_dim matrix.
:param enc_outputs: # SL x B x self.attn_dim matrix
:return: input projected by the weight matrix of the attention module.
"""
src_seq_len, batch_size, _enc_dim = enc_outputs.size()
enc_outputs_reshaped = enc_outputs.view(-1, self.h_dim)
proj = self.U(enc_outputs_reshaped)
proj_reshaped = proj.view(src_seq_len, batch_size, self.a_dim)
return proj_reshaped
def forward(self, prev_h_batch, enc_outputs):
"""
:param prev_h_batch: 1 x B x dec_dim
:param enc_outputs: SL x B x (num_directions * encoder_hidden_dim)
:return: attn weights: # B x SL
"""
src_seq_len, batch_size, _enc_dim = enc_outputs.size()
u_input = enc_outputs.view(-1, self.h_dim)
uh = self.U(u_input).view(src_seq_len, batch_size, self.a_dim)
wq = self.W(prev_h_batch.view(-1, self.s_dim)).unsqueeze(0)
wq3d = wq.expand_as(uh)
wquh = self.tanh(wq3d + uh)
attn_unnorm_scores = self.v(wquh.view(-1, self.a_dim)).view(batch_size,
src_seq_len)
attn_weights = self.softmax(attn_unnorm_scores)
return attn_weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GSA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ap/capazs53vf7hafcoa3lzpsnuw7vvpg5foinomftp5aheilxucgrj.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, %addmm_4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aq/caq6h3dan4vzhhsrm4lvbcqdagc4iozbtbt5fld7rswxmgjafkd4.py
# Topologically Sorted Source Nodes: [kq, k_2, q_2], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# k_2 => mul_1
# kq => sigmoid
# q_2 => mul_2
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_5,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_1, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp4 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/r2/cr27m63n6a7wbqa5bchmoh4ayk7vptybueylj27efdfoaxonqpmx.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div_1, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = tmp7 / tmp10
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf1, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_9
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf2, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_11
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf5, reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_13
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kq, k_2, q_2], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, buf6, buf2, buf7, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf7, reinterpret_tensor(buf8, (4, 4), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf9, buf12, 1, 16, grid=grid(1), stream=stream0)
buf13 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf0, out=buf13)
return (buf13, primals_1, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf12, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf8, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GSAHelper(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, k, q):
m = k.shape[0]
k_1 = self.fc_k(k)
q_1 = self.fc_q(q)
kq = nn.Sigmoid()(self.fc_kq(torch.mul(k_1, q_1)))
k_2 = torch.mul(k, kq)
q_2 = torch.mul(q, kq)
mul = torch.mm(k_2, torch.t(q_2)) / self.d ** (1.0 / 2)
a = nn.Softmax()(torch.flatten(mul)).view(m, m)
return a
class GSA(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_v = nn.Linear(self.d, self.d)
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.gsa_helper = GSAHelper(self.d)
def forward(self, x):
x.shape[0]
v = self.fc_v(x)
k = self.fc_k(x)
q = self.fc_q(x)
a = self.gsa_helper(k, q)
f = torch.mm(a, v)
return f
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp4 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = tmp7 / tmp10
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(
primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf1, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_9
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf2, reinterpret_tensor(
primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_11
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, buf5, reinterpret_tensor(
primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_13
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(16)](buf1, buf6, buf2, buf7,
buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(buf8, (4, 4), (1, 4), 0),
out=buf9)
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
triton_per_fused__softmax_2[grid(1)](buf9, buf12, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf13 = buf9
del buf9
extern_kernels.mm(reinterpret_tensor(buf12, (4, 4), (4, 1), 0),
buf0, out=buf13)
return (buf13, primals_1, buf1, buf2, buf3, buf4, buf5, buf6, buf7,
buf12, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf8,
primals_12, primals_10, primals_8)
class GSAHelper(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, k, q):
m = k.shape[0]
k_1 = self.fc_k(k)
q_1 = self.fc_q(q)
kq = nn.Sigmoid()(self.fc_kq(torch.mul(k_1, q_1)))
k_2 = torch.mul(k, kq)
q_2 = torch.mul(q, kq)
mul = torch.mm(k_2, torch.t(q_2)) / self.d ** (1.0 / 2)
a = nn.Softmax()(torch.flatten(mul)).view(m, m)
return a
class GSANew(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_v = nn.Linear(self.d, self.d)
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.gsa_helper = GSAHelper(self.d)
def forward(self, input_0):
primals_1 = self.fc_v.weight
primals_3 = self.fc_v.bias
primals_2 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_4 = self.fc_q.weight
primals_7 = self.fc_q.bias
primals_6 = self.gsa_helper.fc_k.weight
primals_9 = self.gsa_helper.fc_k.bias
primals_8 = self.gsa_helper.fc_q.weight
primals_11 = self.gsa_helper.fc_q.bias
primals_10 = self.gsa_helper.fc_kq.weight
primals_13 = self.gsa_helper.fc_kq.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| VKCOM/TopicsDataset | GSA | false | 5,930 | [
"MIT"
] | 1 | 149919321ba61a8f17b22f62f60f4aedec43d72b | https://github.com/VKCOM/TopicsDataset/tree/149919321ba61a8f17b22f62f60f4aedec43d72b | import torch
from torch import nn
class GSAHelper(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.fc_kq = nn.Linear(self.d, self.d)
def forward(self, k, q):
m = k.shape[0]
k_1 = self.fc_k(k)
q_1 = self.fc_q(q)
kq = nn.Sigmoid()(self.fc_kq(torch.mul(k_1, q_1)))
k_2 = torch.mul(k, kq)
q_2 = torch.mul(q, kq)
mul = torch.mm(k_2, torch.t(q_2)) / self.d ** (1.0 / 2)
a = nn.Softmax()(torch.flatten(mul)).view(m, m)
return a
class Model(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
self.fc_v = nn.Linear(self.d, self.d)
self.fc_k = nn.Linear(self.d, self.d)
self.fc_q = nn.Linear(self.d, self.d)
self.gsa_helper = GSAHelper(self.d)
def forward(self, x):
x.shape[0]
v = self.fc_v(x)
k = self.fc_k(x)
q = self.fc_q(x)
a = self.gsa_helper(k, q)
f = torch.mm(a, v)
return f
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
CpuSpeedModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kp/ckpcktckccng2feaydijkqxegeqaibgbp2zftaad7t25xeil4wu7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_5 => sigmoid_2
# Graph fragment:
# %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 100), (100, 1))
assert_size_stride(primals_5, (100, ), (1, ))
assert_size_stride(primals_6, (4, 100), (100, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 6400, grid=grid(6400), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_0.run(buf3, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((100, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CpuSpeedModel(nn.Module):
def __init__(self, input_size, output_size):
super(CpuSpeedModel, self).__init__()
hidden_size = 100
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.linear1(x)
x = torch.sigmoid(x)
x = self.linear2(x)
x = torch.sigmoid(x)
x = self.linear3(x)
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 100), (100, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (4, 100), (100, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(6400)](buf1, primals_2, 6400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(6400)](buf3, primals_5, 6400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_6, primals_4
class CpuSpeedModelNew(nn.Module):
def __init__(self, input_size, output_size):
super(CpuSpeedModelNew, self).__init__()
hidden_size = 100
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| VVKot/mlinsecond-general-cpu | CpuSpeedModel | false | 5,931 | [
"MIT"
] | 1 | d3e08027dc3152b5c88c2e5bf4b365eedbdcb0d1 | https://github.com/VVKot/mlinsecond-general-cpu/tree/d3e08027dc3152b5c88c2e5bf4b365eedbdcb0d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
hidden_size = 100
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.linear1(x)
x = torch.sigmoid(x)
x = self.linear2(x)
x = torch.sigmoid(x)
x = self.linear3(x)
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SinkhornKnopp | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3g/c3gd5ipjfbufzu6hvujxufg6z3emufd62cvcqjyy3muqew2xvzbd.py
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pb/cpb5rsib7fg2llnv7dg2enzvdhrqqrlufmxvryadr6zckhtmyysm.py
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_2, [1], True), kwargs = {})
triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ay/cayipr5tzejzfvzl7ba3w7cqhf2axor2ag2cqam22ambv3zsqfqn.py
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_3 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_6, [0], True), kwargs = {})
triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gf/cgfazley3l5rdd6ypj2skdwbxoltt6cdo3d3mpt4z3kqywdlsil5.py
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_1 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_10, [1], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/im/cimyq4ncn2lt7wfmnkrejd4t7hctagm26a6wsjpob7y2e6gdebuy.py
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_7 => div_7
# Graph fragment:
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_12, 4), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f4/cf4q6mh4hv4abtcbirw4vkce7au36jgqitbjdciddckwumhdqyiq.py
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_9 => div_9
# Graph fragment:
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_16, 4), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zl/czlhaiakbynkvvwkmvsahwmxzw7xdccr5ho7bxsxvn4m725ewzpe.py
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_11 => div_11
# Graph fragment:
# %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_20, 4), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/36/c36xk4rs42yzbwetmlob65txyg64pwcsheobuwb6xw3pwyopqghz.py
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# Q_14 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_26, 4), kwargs = {})
triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + (4*y0)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dt/cdt2v5rbgu26tdvjkb5jbjk3tqziqwvnlr3boatkbhu2j3hsthua.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %permute_31 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%permute_30, [1, 0]), kwargs = {})
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
triton_poi_fused_sum_1.run(arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
triton_poi_fused_sum_2.run(arg0_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
triton_poi_fused_div_4.run(arg0_1, buf0, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf6, buf7, 4, 4, grid=grid(4, 4), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
del buf7
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed as dist
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_7[grid(4, 4)](buf6, buf7, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_8[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf7
return buf8,
class SinkhornKnoppNew(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| TranNhiem/MVAR_SSL | SinkhornKnopp | false | 5,932 | [
"MIT"
] | 1 | 339964db4d40f06a92866675ff99ef67cd968cca | https://github.com/TranNhiem/MVAR_SSL/tree/339964db4d40f06a92866675ff99ef67cd968cca | import torch
import torch.distributed as dist
class Model(torch.nn.Module):
def __init__(self, num_iters: 'int'=3, epsilon: 'float'=0.05,
world_size: 'int'=1):
"""Approximates optimal transport using the Sinkhorn-Knopp algorithm.
A simple iterative method to approach the double stochastic matrix is to alternately rescale
rows and columns of the matrix to sum to 1.
Args:
num_iters (int, optional): number of times to perform row and column normalization.
Defaults to 3.
epsilon (float, optional): weight for the entropy regularization term. Defaults to 0.05.
world_size (int, optional): number of nodes for distributed training. Defaults to 1.
"""
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
self.world_size = world_size
@torch.no_grad()
def forward(self, Q: 'torch.Tensor') ->torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0]
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
WeightNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fa/cfa6itnokv74hv7k5ssvcinm66irvjdzfky2hejxqzrga4jb3fq4.py
# Topologically Sorted Source Nodes: [x, sigmoid, x_3], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# x => convolution
# x_3 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%permute,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 2), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, sigmoid, x_3], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf2, primals_1, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeightNet(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, self.groups, t)
x = x.permute(0, 2, 1)
x = 2 * self.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(16)](buf1,
primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_1, primals_2, buf1
class WeightNetNew(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.conv.bias.data[...] = 0
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION | WeightNet | false | 5,933 | [
"MIT"
] | 1 | 6f4d1c7e6883d6b0664fcd04265f437247afab54 | https://github.com/VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION/tree/6f4d1c7e6883d6b0664fcd04265f437247afab54 | import torch
import torch.nn as nn
class Model(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, self.groups, t)
x = x.permute(0, 2, 1)
x = 2 * self.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 1]
|
ResidualSequential | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ng/cnggtmai2hzxc7e5creviqseyyf7qiy5pfpdjlp2pomqsserjuzj.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.nn as nn
import torch.nn.init
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
x_ = x[:, :, diff2 / 2:out.size(2) + diff2 / 2, diff3 / 2:out.
size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.optim
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ResidualSequentialNew(nn.Sequential):
def __init__(self, *args):
super(ResidualSequentialNew, self).__init__(*args)
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Volodimirich/DL-in-denoising-MCT-rock-images | ResidualSequential | false | 5,934 | [
"MIT"
] | 1 | 0201d42a45221e4e0faaf50c59bf48c435bcdc82 | https://github.com/Volodimirich/DL-in-denoising-MCT-rock-images/tree/0201d42a45221e4e0faaf50c59bf48c435bcdc82 | import torch
import torch.optim
import torch.nn as nn
import torch.nn.init
class Model(nn.Sequential):
def __init__(self, *args):
super().__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
x_ = x[:, :, diff2 / 2:out.size(2) + diff2 / 2, diff3 / 2:out.
size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TorchModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ku/ckukyw44hxxcrcpyqqe6auljaf54daimtcs6kbykg5nkqzpxqi7c.py
# Topologically Sorted Source Nodes: [tanh_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh_2 => tanh_2
# Graph fragment:
# %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (1, 4, 4, 4, 64), (4096, 1024, 256, 64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (1, 4, 4, 4, 64), (4096, 1024, 256, 64, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf4, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super(TorchLinearModule, self).__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class TorchModule(torch.nn.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
super(TorchModule, self).__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, x):
x = x.unsqueeze(0)
x = torch.tanh(self._linear0(x))
x = torch.tanh(self._linear1(x))
return torch.tanh(self._linear2(x))[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (1, 4, 4, 4, 64), (4096, 1024, 256,
64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (1, 4, 4, 4, 64), (4096, 1024, 256,
64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, buf4, primals_6, primals_4
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super(TorchLinearModule, self).__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class TorchModuleNew(torch.nn.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
super(TorchModuleNew, self).__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, input_0):
primals_2 = self._linear0._linear.weight
primals_3 = self._linear0._linear.bias
primals_4 = self._linear1._linear.weight
primals_5 = self._linear1._linear.bias
primals_6 = self._linear2._linear.weight
primals_7 = self._linear2._linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| VedPatwardhan/ivy | TorchModule | false | 5,935 | [
"Apache-2.0"
] | 1 | 7b2105fa8cf38879444a1029bfaa7f0b2f27717a | https://github.com/VedPatwardhan/ivy/tree/7b2105fa8cf38879444a1029bfaa7f0b2f27717a | import torch
import torch.nn
class TorchLinearModule(torch.nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = torch.nn.Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class Model(torch.nn.Module):
def __init__(self, in_size, out_size, device=None, hidden_size=64):
super().__init__()
self._linear0 = TorchLinearModule(in_size, hidden_size)
self._linear1 = TorchLinearModule(hidden_size, hidden_size)
self._linear2 = TorchLinearModule(hidden_size, out_size)
def forward(self, x):
x = x.unsqueeze(0)
x = torch.tanh(self._linear0(x))
x = torch.tanh(self._linear1(x))
return torch.tanh(self._linear2(x))[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/m3/cm36x6anfnnqsryyawwl54kzwksxpwfrpbhzarigqywk7tkxqc4t.py
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# h_tv => sum_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# w_tv => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 48), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 48), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 8), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
triton_per_fused_add_div_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 3
r3 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 8.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.utils.data
import torch.utils.data.dataset
import torch
import torch.nn as nn
import torch.utils.data.distributed
class TVLoss(nn.Module):
"""Regularization loss based on Li FeiFei."""
def __init__(self, weight: 'Tensor') ->None:
"""The weight information of loss is based on the image information generated by the generator.
Args:
weight (tensor): Fake high resolution image weight.
"""
super(TVLoss, self).__init__()
self.weight = weight
def forward(self, input: 'Tensor') ->Tensor:
batch_size = input.size()[0]
h_x = input.size()[2]
w_x = input.size()[3]
count_h = self.tensor_size(input[:, :, 1:, :])
count_w = self.tensor_size(input[:, :, :, 1:])
h_tv = torch.pow(input[:, :, 1:, :] - input[:, :, :h_x - 1, :], 2).sum(
)
w_tv = torch.pow(input[:, :, :, 1:] - input[:, :, :, :w_x - 1], 2).sum(
)
return self.weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import Tensor
import torch.utils.data
import torch.utils.data.dataset
import torch
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 8.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
"""Regularization loss based on Li FeiFei."""
def __init__(self, weight: 'Tensor') ->None:
"""The weight information of loss is based on the image information generated by the generator.
Args:
weight (tensor): Fake high resolution image weight.
"""
super(TVLossNew, self).__init__()
self.weight = weight
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Tubbz-alt/SRGAN-PyTorch-2 | TVLoss | false | 5,936 | [
"Apache-2.0"
] | 1 | c1a01c99287a6212a3dc76ac17baafcf1c9f3013 | https://github.com/Tubbz-alt/SRGAN-PyTorch-2/tree/c1a01c99287a6212a3dc76ac17baafcf1c9f3013 | import torch
from torch import Tensor
import torch.utils.data
import torch.utils.data.dataset
import torch
import torch.nn as nn
import torch.utils.data.distributed
class Model(nn.Module):
"""Regularization loss based on Li FeiFei."""
def __init__(self, weight: 'Tensor') ->None:
"""The weight information of loss is based on the image information generated by the generator.
Args:
weight (tensor): Fake high resolution image weight.
"""
super().__init__()
self.weight = weight
def forward(self, input: 'Tensor') ->Tensor:
batch_size = input.size()[0]
h_x = input.size()[2]
w_x = input.size()[3]
count_h = self.tensor_size(input[:, :, 1:, :])
count_w = self.tensor_size(input[:, :, :, 1:])
h_tv = torch.pow(input[:, :, 1:, :] - input[:, :, :h_x - 1, :], 2).sum(
)
w_tv = torch.pow(input[:, :, :, 1:] - input[:, :, :, :w_x - 1], 2).sum(
)
return self.weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
OffsetNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3u6zmp5plplv5tjyzlxet5sgcucpeizysbhi7xphxjhdc6kmodq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qs/cqsk6a76egdyzr4pbwvfkopx7r76mm6pjq7rxndgddfyegzfpbgy.py
# Topologically Sorted Source Nodes: [sigmoid, sub, x_5], Original ATen: [aten.sigmoid, aten.sub, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# sub => sub
# x_5 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
triton_poi_fused_mul_sigmoid_sub_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, sub, x_5], Original ATen: [aten.sigmoid, aten.sub, aten.mul]
triton_poi_fused_mul_sigmoid_sub_2.run(buf5, buf6, 4, grid=grid(4), stream=stream0)
return (buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, t)
x = self.relu(self.fc1(x))
x = self.fc2(x)
x = x.view(n, 1, -1)
x = 4 * (self.sigmoid(x) - 0.5)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1, 'num_segments': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_mul_sigmoid_sub_2[grid(4)](buf5, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), buf3, buf5, primals_6, primals_4
class OffsetNetNew(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION | OffsetNet | false | 5,937 | [
"MIT"
] | 1 | 6f4d1c7e6883d6b0664fcd04265f437247afab54 | https://github.com/VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION/tree/6f4d1c7e6883d6b0664fcd04265f437247afab54 | import torch
import torch.nn as nn
class Model(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, t)
x = self.relu(self.fc1(x))
x = self.fc2(x)
x = x.view(n, 1, -1)
x = 4 * (self.sigmoid(x) - 0.5)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 1, 4]
|
CombinedPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3l/c3lehpaeu67ebeqx6utcdy2uk3rpahkhhcadgbpm6wcroyb6qsjs.py
# Topologically Sorted Source Nodes: [max_pooled, avg_pooled, add], Original ATen: [aten.adaptive_max_pool2d, aten.mean, aten.add]
# Source node to ATen node mapping:
# add => add
# avg_pooled => mean
# max_pooled => adaptive_max_pool2d
# Graph fragment:
# %adaptive_max_pool2d : [num_users=1] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%arg0_1, [1, 1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %mean), kwargs = {})
triton_per_fused_adaptive_max_pool2d_add_mean_0 = async_compile.triton('triton_per_fused_adaptive_max_pool2d_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_adaptive_max_pool2d_add_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp35 + tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [max_pooled, avg_pooled, add], Original ATen: [aten.adaptive_max_pool2d, aten.mean, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_0.run(buf2, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data.distributed
class CombinedPooling(nn.Module):
def __init__(self):
super().__init__()
self.max_pooling = nn.AdaptiveMaxPool2d(1)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
max_pooled = self.max_pooling(x)
avg_pooled = self.avg_pooling(x)
return max_pooled + avg_pooled
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.optim
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp35 + tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp38, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class CombinedPoolingNew(nn.Module):
def __init__(self):
super().__init__()
self.max_pooling = nn.AdaptiveMaxPool2d(1)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| VisualComputingInstitute/CROWDBOT_perception | CombinedPooling | false | 5,938 | [
"MIT"
] | 1 | df98f3f658c39fb3fa4ac0456f1214f7918009f6 | https://github.com/VisualComputingInstitute/CROWDBOT_perception/tree/df98f3f658c39fb3fa4ac0456f1214f7918009f6 | import torch
import torch.optim
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
self.max_pooling = nn.AdaptiveMaxPool2d(1)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
max_pooled = self.max_pooling(x)
avg_pooled = self.avg_pooling(x)
return max_pooled + avg_pooled
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SEModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tu/ctuej2j6f3oxr5p43q7juhagc3r3ncgs2ikvxemutunlnxlnvl24.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2, -3], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgo7ojqi6f3flplnruvl4wc24c2i4immxpu4li4bvsqvh4bgegyj.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/72/c72kehajbo6zfhkuwjl3g6t24haqfzxumia5abs5c2hzebjb6ubo.py
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x_4 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1, 1), (8, 1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (8, 1, 1, 1), (1, 8, 8, 8), 0); del buf2 # reuse
buf7 = empty_strided_cuda((8, 1, 1, 1), (1, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_3, buf7, 8, grid=grid(8), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 8, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 4, grid=grid(4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1), 0), buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1, 1), (8, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SEModule(nn.Module):
def __init__(self, channels, reduction=1 / 16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(channels, self.bottleneck, kernel_size=1,
padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(self.bottleneck, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1, 1), (8, 1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (8, 1, 1, 1), (1, 8, 8, 8), 0)
del buf2
buf7 = empty_strided_cuda((8, 1, 1, 1), (1, 1, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8)](buf3, primals_3,
buf7, 8, XBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 8, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1,
8, 1, 1, 1), (8, 1, 1, 1, 1), 0), buf5, buf7
class SEModuleNew(nn.Module):
def __init__(self, channels, reduction=1 / 16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(channels, self.bottleneck, kernel_size=1,
padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(self.bottleneck, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION | SEModule | false | 5,939 | [
"MIT"
] | 1 | 6f4d1c7e6883d6b0664fcd04265f437247afab54 | https://github.com/VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION/tree/6f4d1c7e6883d6b0664fcd04265f437247afab54 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, channels, reduction=1 / 16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(channels, self.bottleneck, kernel_size=1,
padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(self.bottleneck, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PointWiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2v/c2vp4wevd4mmk6p3qeilou7zqojjaarvm3pedrgkmrhhjbggkpqu.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gh/cgh6bfy2bihruxswmk3lelq27ifjcz4y2cqp5igddznjqqf2443f.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s7/cs7zbumkjpjtp2asue2qrb7mzbik5vw3nnnxxz27vafugyjs7fr4.py
# Topologically Sorted Source Nodes: [outputs_2], Original ATen: [aten.add, aten.transpose]
# Source node to ATen node mapping:
# outputs_2 => add, permute_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %permute_3 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%permute_2, [1, 0]), kwargs = {})
triton_poi_fused_add_transpose_2 = async_compile.triton('triton_poi_fused_add_transpose_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_transpose_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_transpose_2(in_ptr0, in_ptr1, in_ptr2, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1 + (4*y0)), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr1 + (y0 + (4*x1)), tmp4, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 4, 4, grid=grid(4, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf6, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4), (16, 4, 1))
buf5 = reinterpret_tensor(buf0, (4, 4), (1, 4), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs_2], Original ATen: [aten.add, aten.transpose]
triton_poi_fused_add_transpose_2.run(buf3, primals_5, primals_1, buf5, 4, 4, grid=grid(4, 4), stream=stream0)
del buf3
del primals_5
return (buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0), buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class PointWiseFeedForward(torch.nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = torch.nn.Dropout(p=dropout_rate)
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = torch.nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.
conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2)
outputs += inputs
return outputs
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_units': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_transpose_2(in_ptr0, in_ptr1, in_ptr2, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1 + 4 * y0), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr1 + (y0 + 4 * x1), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4, 4)](primals_1, buf0, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf2,
primals_3, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4
), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (1, 4, 4), (16, 4, 1))
buf5 = reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
del buf0
triton_poi_fused_add_transpose_2[grid(4, 4)](buf3, primals_5,
primals_1, buf5, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1,
num_stages=1)
del buf3
del primals_5
return buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4,
4), (4, 1, 4), 0), reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0
), buf6
class PointWiseFeedForwardNew(torch.nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForwardNew, self).__init__()
self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = torch.nn.Dropout(p=dropout_rate)
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = torch.nn.Dropout(p=dropout_rate)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Vivdaddy/recsys-filterbubbles | PointWiseFeedForward | false | 5,940 | [
"MIT"
] | 1 | d21639bce515ffef5ba2db530dc2505eee1f83c0 | https://github.com/Vivdaddy/recsys-filterbubbles/tree/d21639bce515ffef5ba2db530dc2505eee1f83c0 | import torch
class Model(torch.nn.Module):
def __init__(self, hidden_units, dropout_rate):
super().__init__()
self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = torch.nn.Dropout(p=dropout_rate)
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = torch.nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.
conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2)
outputs += inputs
return outputs
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 0.5]
|
SigmaL1SmoothLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/oe/coe2tiy23oizimtsb53xrcsym37c537aviir7xix2u5zktc32ult.py
# Topologically Sorted Source Nodes: [sub, reg_diff, le, pow_1, mul, sub_1, reg_loss, mean], Original ATen: [aten.sub, aten.abs, aten.le, aten.pow, aten.mul, aten.where, aten.mean]
# Source node to ATen node mapping:
# le => le
# mean => mean
# mul => mul
# pow_1 => pow_1
# reg_diff => abs_1
# reg_loss => where
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%abs_1, 0.1111111111111111), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 4.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.05555555555555555), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
triton_per_fused_abs_le_mean_mul_pow_sub_where_0 = async_compile.triton('triton_per_fused_abs_le_mean_mul_pow_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_le_mean_mul_pow_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_le_mean_mul_pow_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 0.1111111111111111
tmp5 = tmp3 <= tmp4
tmp6 = tmp3 * tmp3
tmp7 = 4.5
tmp8 = tmp6 * tmp7
tmp9 = 0.05555555555555555
tmp10 = tmp3 - tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, reg_diff, le, pow_1, mul, sub_1, reg_loss, mean], Original ATen: [aten.sub, aten.abs, aten.le, aten.pow, aten.mul, aten.where, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_le_mean_mul_pow_sub_where_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SigmaL1SmoothLoss(nn.Module):
def forward(self, pred, targ):
reg_diff = torch.abs(targ - pred)
reg_loss = torch.where(torch.le(reg_diff, 1 / 9), 4.5 * torch.pow(
reg_diff, 2), reg_diff - 1 / 18)
return reg_loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_le_mean_mul_pow_sub_where_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 0.1111111111111111
tmp5 = tmp3 <= tmp4
tmp6 = tmp3 * tmp3
tmp7 = 4.5
tmp8 = tmp6 * tmp7
tmp9 = 0.05555555555555555
tmp10 = tmp3 - tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_le_mean_mul_pow_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SigmaL1SmoothLossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| VrunArya/Hacktoberfest2021 | SigmaL1SmoothLoss | false | 5,941 | [
"MIT"
] | 1 | 5e739e52310dabf8b131abe5ecf906e13711b9d6 | https://github.com/VrunArya/Hacktoberfest2021/tree/5e739e52310dabf8b131abe5ecf906e13711b9d6 | import torch
from torch import nn
class Model(nn.Module):
def forward(self, pred, targ):
reg_diff = torch.abs(targ - pred)
reg_loss = torch.where(torch.le(reg_diff, 1 / 9), 4.5 * torch.pow(
reg_diff, 2), reg_diff - 1 / 18)
return reg_loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ChebConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ja/cjavyu5r4xu5nlitoyabvy7ofhz4ogm5mu3dk4l67xn2v3fgexma.py
# Topologically Sorted Source Nodes: [D], Original ATen: [aten.diag_embed]
# Source node to ATen node mapping:
# D => eq, full_default, iota, where
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute, %full_default), kwargs = {})
triton_poi_fused_diag_embed_0 = async_compile.triton('triton_poi_fused_diag_embed_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = -0.5
tmp11 = libdevice.pow(tmp9, tmp10)
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7m/c7melmuezdwy5c6u3qg5b2dvtctgso6etynkxd74lrziaclsxuvt.py
# Topologically Sorted Source Nodes: [D, eye, L], Original ATen: [aten.diag_embed, aten.eye, aten.sub]
# Source node to ATen node mapping:
# D => full_default, iota
# L => sub
# eye => eq_1, full_default_1, where_1
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %iota), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %full_default), kwargs = {})
# %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mm_1), kwargs = {})
triton_poi_fused_diag_embed_eye_sub_1 = async_compile.triton('triton_poi_fused_diag_embed_eye_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_eye_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_eye_sub_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp6 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nt/cnt63h74o735agddjfb5clildoy6gfngo35dt46zyissubqboxxq.py
# Topologically Sorted Source Nodes: [D, eye, multi_order_laplacian], Original ATen: [aten.diag_embed, aten.eye, aten.zeros]
# Source node to ATen node mapping:
# D => full_default, iota
# eye => eq_1, full_default_1, where_1
# multi_order_laplacian => full_default_3
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %iota), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %full_default), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([5, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_3, %where_1, 0, 0), kwargs = {})
# %select_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %sub, 0, 1), kwargs = {})
triton_poi_fused_diag_embed_eye_zeros_2 = async_compile.triton('triton_poi_fused_diag_embed_eye_zeros_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_eye_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_eye_zeros_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp5, tmp11, tmp10)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crsoboivz65dylv2rwpdelqiarxf2u4i3uaaebwqwvnjhxkjbjjw.py
# Topologically Sorted Source Nodes: [mul, sub_1], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul => mul
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %select_8), kwargs = {})
# %select_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %sub_1, 0, 2), kwargs = {})
triton_poi_fused_mul_sub_3 = async_compile.triton('triton_poi_fused_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7w/c7wq3e2ygdgfl4kidxckp4el2aafp77m6tdwbcqmea2hupgvkoek.py
# Topologically Sorted Source Nodes: [mul_1, sub_2], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sub_2 => sub_2
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %select_15), kwargs = {})
# %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %sub_2, 0, 3), kwargs = {})
triton_poi_fused_mul_sub_4 = async_compile.triton('triton_poi_fused_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vx/cvxptdlc4xjc2zntndyxuyooh2cl4jzpbya2v26rjm7vmkjhjszi.py
# Topologically Sorted Source Nodes: [mul_2, sub_3], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul_2 => mul_2
# sub_3 => sub_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %select_22), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %sub_3, 0, 4), kwargs = {})
triton_poi_fused_mul_sub_5 = async_compile.triton('triton_poi_fused_mul_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wi/cwija4xxrmds4pzlo5szwl2su3gon4ktmt34eluogiaiavrknbqr.py
# Topologically Sorted Source Nodes: [sum_2, result_2], Original ATen: [aten.sum, aten.add]
# Source node to ATen node mapping:
# result_2 => add
# sum_2 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_6, [0]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %primals_4), kwargs = {})
triton_poi_fused_add_sum_6 = async_compile.triton('triton_poi_fused_add_sum_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (5, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [D], Original ATen: [aten.diag_embed]
stream0 = get_raw_stream(0)
triton_poi_fused_diag_embed_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_1, out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [D, eye, L], Original ATen: [aten.diag_embed, aten.eye, aten.sub]
triton_poi_fused_diag_embed_eye_sub_1.run(buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [D, eye, multi_order_laplacian], Original ATen: [aten.diag_embed, aten.eye, aten.zeros]
triton_poi_fused_diag_embed_eye_zeros_2.run(buf3, buf4, 80, grid=grid(80), stream=stream0)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (4, 4), (4, 1), 16), out=buf5)
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub_1], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_3.run(buf5, buf4, buf6, 80, grid=grid(80), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 32), out=buf7)
buf8 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul_1, sub_2], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_4.run(buf7, buf6, buf8, 80, grid=grid(80), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [mm_4], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf8, (4, 4), (4, 1), 48), out=buf9)
del buf3
buf10 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [mul_2, sub_3], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_5.run(buf9, buf8, buf10, 80, grid=grid(80), stream=stream0)
buf11 = reinterpret_tensor(buf8, (20, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (20, 4), (4, 1), 0), primals_2, out=buf11)
del primals_2
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [result_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (5, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (5, 4, 4), (16, 4, 1), 0), out=buf12)
del primals_3
buf13 = reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [sum_2, result_2], Original ATen: [aten.sum, aten.add]
triton_poi_fused_add_sum_6.run(buf12, primals_4, buf13, 16, grid=grid(16), stream=stream0)
del buf12
del primals_4
return (buf13, reinterpret_tensor(buf11, (5, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((5, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
class ChebConv(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input dim
:param out_c: int, number of output dim
:param K: int, the order of Chebyshev Polynomial,切比雪夫展开多少阶
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConv, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def forward(self, inputs, graph):
"""
:param inputs: the input data, [B, N, C]
:param graph: the graph structure, [N, N]
:return: convolution result, [B, N, D]
"""
L = ChebConv.get_laplacian(graph, self.normalize)
mul_L = self.cheb_polynomial(L).unsqueeze(1)
result = torch.matmul(mul_L, inputs)
result = torch.matmul(result, self.weight)
result = torch.sum(result, dim=0) + self.bias
return result
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_c': 4, 'out_c': 4, 'K': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = -0.5
tmp11 = libdevice.pow(tmp9, tmp10)
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_sub_1(in_out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp6 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_zeros_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp5, tmp11, tmp10)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (5, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_1, out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = buf2
del buf2
triton_poi_fused_diag_embed_eye_sub_1[grid(16)](buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_diag_embed_eye_zeros_2[grid(80)](buf3, buf4, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = buf1
del buf1
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (4, 4), (4, 1), 16
), out=buf5)
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sub_3[grid(80)](buf5, buf4, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.mm(buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 32
), out=buf7)
buf8 = buf4
del buf4
triton_poi_fused_mul_sub_4[grid(80)](buf7, buf6, buf8, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf9 = buf7
del buf7
extern_kernels.mm(buf3, reinterpret_tensor(buf8, (4, 4), (4, 1), 48
), out=buf9)
del buf3
buf10 = buf6
del buf6
triton_poi_fused_mul_sub_5[grid(80)](buf9, buf8, buf10, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf8, (20, 4), (4, 1), 0)
del buf8
extern_kernels.mm(reinterpret_tensor(buf10, (20, 4), (4, 1), 0),
primals_2, out=buf11)
del primals_2
buf12 = buf10
del buf10
extern_kernels.bmm(reinterpret_tensor(buf11, (5, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (5, 4, 4), (16, 4, 1), 0),
out=buf12)
del primals_3
buf13 = reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_sum_6[grid(16)](buf12, primals_4, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf12
del primals_4
return buf13, reinterpret_tensor(buf11, (5, 4, 4), (16, 1, 4), 0)
class ChebConvNew(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input dim
:param out_c: int, number of output dim
:param K: int, the order of Chebyshev Polynomial,切比雪夫展开多少阶
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConvNew, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| V-cyberpunk-01/GNN | ChebConv | false | 5,942 | [
"MIT"
] | 1 | 25a6b24f4d8fad626af33f98e189b221c50406cd | https://github.com/V-cyberpunk-01/GNN/tree/25a6b24f4d8fad626af33f98e189b221c50406cd | import torch
import torch.nn as nn
import torch.nn.init as init
class Model(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input dim
:param out_c: int, number of output dim
:param K: int, the order of Chebyshev Polynomial,切比雪夫展开多少阶
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super().__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def forward(self, inputs, graph):
"""
:param inputs: the input data, [B, N, C]
:param graph: the graph structure, [N, N]
:return: convolution result, [B, N, D]
"""
L = ChebConv.get_laplacian(graph, self.normalize)
mul_L = self.cheb_polynomial(L).unsqueeze(1)
result = torch.matmul(mul_L, inputs)
result = torch.matmul(result, self.weight)
result = torch.sum(result, dim=0) + self.bias
return result
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Loss_fn | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ev/cevrgxdxpmu3sxdi2mxonyf62b6i4l37kax3snq4l7gm2ajbiknc.py
# Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add => add
# diff => sub
# loss => mean
# mul => mul
# sqrt => sqrt
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_mul_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Loss_fn(nn.Module):
def __init__(self, eps=0.001):
super().__init__()
self.eps = eps
def forward(self, ip, target):
diff = ip - target
loss = torch.mean(torch.sqrt(diff * diff + self.eps * self.eps))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class Loss_fnNew(nn.Module):
def __init__(self, eps=0.001):
super().__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Vrushank264/Low-Light-Enhancement | Loss_fn | false | 5,943 | [
"MIT"
] | 1 | 3c13a10a16eab8183b8fbd0c063d9815b662259a | https://github.com/Vrushank264/Low-Light-Enhancement/tree/3c13a10a16eab8183b8fbd0c063d9815b662259a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, eps=0.001):
super().__init__()
self.eps = eps
def forward(self, ip, target):
diff = ip - target
loss = torch.mean(torch.sqrt(diff * diff + self.eps * self.eps))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TemporallyBatchedAdditiveAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ms/cmsc2ldwncb44bwa7sbkqfqtyad2dyjcej7cywqajlpz3qs2bpqf.py
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x0 = xindex % 64
x2 = (xindex // 256)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_probs => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_probs => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gt/cgt6ickymkk3j6ootc4hsb2blqews2n64wmaey7kzqk6e3v2rb4u.py
# Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# final_context_vec => sum_2
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf0, buf1, buf2, 1024, grid=grid(1024), stream=stream0)
buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf5, primals_2, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 4, 16, 1, 1), 0), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf2, buf5, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AdditiveAttention(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(AdditiveAttention, self).__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, encoder_states, decoder_state):
score_vec = torch.cat([self.score(encoder_states[:, i],
decoder_state) for i in range(encoder_states.shape[1])], dim=1)
attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, attention_probs
class TemporallyBatchedAdditiveAttention(AdditiveAttention):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(TemporallyBatchedAdditiveAttention, self).__init__(
encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze(
self.w2(decoder_state), dim=1)))
def forward(self, encoder_states, decoder_state):
score_vec = self.score(encoder_states, decoder_state)
attention_probs = F.softmax(score_vec, dim=1)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, torch.squeeze(torch.transpose(
attention_probs, 1, 2), dim=3)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x0 = xindex % 64
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x4, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](buf0, buf1, buf2, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_mul_sum_3[grid(256)](buf5, primals_2, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 4, 16, 1, 1), 0
), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf2, buf5, primals_5
class AdditiveAttention(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(AdditiveAttention, self).__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, encoder_states, decoder_state):
score_vec = torch.cat([self.score(encoder_states[:, i],
decoder_state) for i in range(encoder_states.shape[1])], dim=1)
attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, attention_probs
class TemporallyBatchedAdditiveAttentionNew(AdditiveAttention):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(TemporallyBatchedAdditiveAttentionNew, self).__init__(
encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze(
self.w2(decoder_state), dim=1)))
def forward(self, input_0, input_1):
primals_1 = self.w1.weight
primals_3 = self.w2.weight
primals_5 = self.v.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| Vision-CAIR/UnlikelihoodMotionForecasting | TemporallyBatchedAdditiveAttention | false | 5,944 | [
"MIT"
] | 1 | 556d6a3ed3e4e0e2d88108d7dbb48933313b58aa | https://github.com/Vision-CAIR/UnlikelihoodMotionForecasting/tree/556d6a3ed3e4e0e2d88108d7dbb48933313b58aa | import torch
import torch.nn as nn
import torch.nn.functional as F
class AdditiveAttention(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super().__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, encoder_states, decoder_state):
score_vec = torch.cat([self.score(encoder_states[:, i],
decoder_state) for i in range(encoder_states.shape[1])], dim=1)
attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, attention_probs
class Model(AdditiveAttention):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super().__init__(encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze(
self.w2(decoder_state), dim=1)))
def forward(self, encoder_states, decoder_state):
score_vec = self.score(encoder_states, decoder_state)
attention_probs = F.softmax(score_vec, dim=1)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, torch.squeeze(torch.transpose(
attention_probs, 1, 2), dim=3)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
FocalLoss2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bs/cbsubpcqxhddkuk3dqnowueqr2onjixodp75rq36ds4who3454xo.py
# Topologically Sorted Source Nodes: [eq, pos_mask, prob, sub, pow_1, mul, log, mul_1, pos_loss, eq_1, neg_mask, pow_2, mul_3, sub_1, log_1, mul_4, neg_loss, loss, mean], Original ATen: [aten.eq, aten._to_copy, aten.clamp, aten.rsub, aten.pow, aten.mul, aten.log, aten.add, aten.mean]
# Source node to ATen node mapping:
# eq => eq
# eq_1 => eq_1
# log => log
# log_1 => log_1
# loss => add
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_3 => mul_3
# mul_4 => mul_4
# neg_loss => mul_5
# neg_mask => convert_element_type_1
# pos_loss => mul_2
# pos_mask => convert_element_type
# pow_1 => pow_1
# pow_2 => pow_2
# prob => clamp_max, clamp_min
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-06), kwargs = {})
# %clamp_max : [num_users=4] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.999999), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %pow_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, -0.25), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 0), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_1, torch.float32), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_max, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %pow_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %log_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, -0.75), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = 1e-06
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.999999
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp3 * tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp11 * tmp12
tmp14 = -0.25
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = tmp0 == tmp16
tmp18 = tmp17.to(tl.float32)
tmp19 = tmp8 * tmp8
tmp20 = tmp18 * tmp19
tmp21 = tl_math.log(tmp9)
tmp22 = tmp20 * tmp21
tmp23 = -0.75
tmp24 = tmp22 * tmp23
tmp25 = tmp15 + tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = 256.0
tmp30 = tmp28 / tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [eq, pos_mask, prob, sub, pow_1, mul, log, mul_1, pos_loss, eq_1, neg_mask, pow_2, mul_3, sub_1, log_1, mul_4, neg_loss, loss, mean], Original ATen: [aten.eq, aten._to_copy, aten.clamp, aten.rsub, aten.pow, aten.mul, aten.log, aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FocalLoss2d(nn.Module):
def __init__(self, alpha=0.25, gamma=2, ignore_index=None, reduction=
'mean', **kwargs):
super(FocalLoss2d, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-06
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
def forward(self, prob, target):
prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)
valid_mask = None
if self.ignore_index is not None:
valid_mask = (target != self.ignore_index).float()
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
if valid_mask is not None:
pos_mask = pos_mask * valid_mask
neg_mask = neg_mask * valid_mask
pos_weight = (pos_mask * torch.pow(1 - prob, self.gamma)).detach()
pos_loss = -self.alpha * (pos_weight * torch.log(prob))
neg_weight = (neg_mask * torch.pow(prob, self.gamma)).detach()
neg_loss = -(1 - self.alpha) * (neg_weight * torch.log(1 - prob))
loss = pos_loss + neg_loss
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = 1e-06
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.999999
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp3 * tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp11 * tmp12
tmp14 = -0.25
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = tmp0 == tmp16
tmp18 = tmp17.to(tl.float32)
tmp19 = tmp8 * tmp8
tmp20 = tmp18 * tmp19
tmp21 = tl_math.log(tmp9)
tmp22 = tmp20 * tmp21
tmp23 = -0.75
tmp24 = tmp22 * tmp23
tmp25 = tmp15 + tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = 256.0
tmp30 = tmp28 / tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_log_mean_mul_pow_rsub_0[grid(1)
](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLoss2dNew(nn.Module):
def __init__(self, alpha=0.25, gamma=2, ignore_index=None, reduction=
'mean', **kwargs):
super(FocalLoss2dNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-06
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WHU-YH-jx/bionetwork_segmentation | FocalLoss2d | false | 5,945 | [
"MIT"
] | 1 | 556c5b61a1a3784875b31eacb8c6bb418d70ee9a | https://github.com/WHU-YH-jx/bionetwork_segmentation/tree/556c5b61a1a3784875b31eacb8c6bb418d70ee9a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, alpha=0.25, gamma=2, ignore_index=None, reduction=
'mean', **kwargs):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-06
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
def forward(self, prob, target):
prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)
valid_mask = None
if self.ignore_index is not None:
valid_mask = (target != self.ignore_index).float()
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
if valid_mask is not None:
pos_mask = pos_mask * valid_mask
neg_mask = neg_mask * valid_mask
pos_weight = (pos_mask * torch.pow(1 - prob, self.gamma)).detach()
pos_loss = -self.alpha * (pos_weight * torch.log(prob))
neg_weight = (neg_mask * torch.pow(prob, self.gamma)).detach()
neg_loss = -(1 - self.alpha) * (neg_weight * torch.log(1 - prob))
loss = pos_loss + neg_loss
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/uc/cucdaa5tqnxykdmw5yqh7ir5ac35phopjcobljrg4rrtlnfjtuwd.py
# Topologically Sorted Source Nodes: [compress_x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# compress_x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hy/chyq5lqryq6qebxlfhdffupuca4px672fxychf3hng5vgomdaota.py
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k4/ck42otglczd2qtk6fovmlu2yv7bzgywgiadyjygqbvu4m2rftbjm.py
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# scale => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 5, 5), (50, 25, 5, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [compress_x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_2.run(primals_1, buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 5, 5), (50, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CompressChannels(nn.Module):
"""
Compresses the input channels to 2 by concatenating the results of
Global Average Pooling(GAP) and Global Max Pooling(GMP).
HxWxC => HxWx2
"""
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class SpatialAttention(nn.Module):
"""
Spatial Attention:
HxWxC
|
---------
| |
GAP GMP
| |
----C---
|
HxWx2
|
Conv
|
Sigmoid
|
HxWx1
Multiplying HxWx1 with input again gives output -> HxWxC
"""
def __init__(self):
super().__init__()
self.compress_channels = CompressChannels()
self.conv = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=5,
stride=1, padding=2)
def forward(self, x):
compress_x = self.compress_channels(x)
x_out = self.conv(compress_x)
scale = torch.sigmoid(x_out)
return x * scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 5, 5), (50, 25, 5, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, buf0, buf2
class CompressChannels(nn.Module):
"""
Compresses the input channels to 2 by concatenating the results of
Global Average Pooling(GAP) and Global Max Pooling(GMP).
HxWxC => HxWx2
"""
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class SpatialAttentionNew(nn.Module):
"""
Spatial Attention:
HxWxC
|
---------
| |
GAP GMP
| |
----C---
|
HxWx2
|
Conv
|
Sigmoid
|
HxWx1
Multiplying HxWx1 with input again gives output -> HxWxC
"""
def __init__(self):
super().__init__()
self.compress_channels = CompressChannels()
self.conv = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=5,
stride=1, padding=2)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Vrushank264/Low-Light-Enhancement | SpatialAttention | false | 5,946 | [
"MIT"
] | 1 | 3c13a10a16eab8183b8fbd0c063d9815b662259a | https://github.com/Vrushank264/Low-Light-Enhancement/tree/3c13a10a16eab8183b8fbd0c063d9815b662259a | import torch
import torch.nn as nn
class CompressChannels(nn.Module):
"""
Compresses the input channels to 2 by concatenating the results of
Global Average Pooling(GAP) and Global Max Pooling(GMP).
HxWxC => HxWx2
"""
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class Model(nn.Module):
"""
Spatial Attention:
HxWxC
|
---------
| |
GAP GMP
| |
----C---
|
HxWx2
|
Conv
|
Sigmoid
|
HxWx1
Multiplying HxWx1 with input again gives output -> HxWxC
"""
def __init__(self):
super().__init__()
self.compress_channels = CompressChannels()
self.conv = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=5,
stride=1, padding=2)
def forward(self, x):
compress_x = self.compress_channels(x)
x_out = self.conv(compress_x)
scale = torch.sigmoid(x_out)
return x * scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s7/cs7jv7spsytbq3ouvdhla2tcr7wzgoznysid6m7rapuqn7g7cc3h.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ng/cngbi3yr4c2hnkkdy3i7xpiprlmcbqpynwcgxiwltr3g47fmzttz.py
# Topologically Sorted Source Nodes: [mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1], Original ATen: [aten.mul, aten.add, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# dice_1 => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv_1 => div_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = 1.0
tmp16 = tmp15 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1], Original ATen: [aten.mul, aten.add, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
smooth = 1e-05
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = input * target
dice = (2.0 * intersection.sum(1) + smooth) / (input.sum(1) +
target.sum(1) + smooth)
dice = 1 - dice.sum() / num
return dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = 1.0
tmp16 = tmp15 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WHU-YH-jx/bionetwork_segmentation | DiceLoss | false | 5,947 | [
"MIT"
] | 1 | 556c5b61a1a3784875b31eacb8c6bb418d70ee9a | https://github.com/WHU-YH-jx/bionetwork_segmentation/tree/556c5b61a1a3784875b31eacb8c6bb418d70ee9a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
smooth = 1e-05
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = input * target
dice = (2.0 * intersection.sum(1) + smooth) / (input.sum(1) +
target.sum(1) + smooth)
dice = 1 - dice.sum() / num
return dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DiffLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/a2/ca2bypdozo4vzjrvjjoirhpfgvzfuin2hjpbbvp4ycptip36csey.py
# Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# input1_1 => sub
# input1_l2 => div
# input1_mean => mean
# norm => pow_1, sum_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = tmp16 + tmp17
tmp19 = tmp10 / tmp18
tl.store(out_ptr2 + (r1 + (64*x0)), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e2/ce27barrmkjhojql257rnzuamyefwb737kzvhkz47ba5fumvkj2b.py
# Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean]
# Source node to ATen node mapping:
# diff_loss => mean_2
# pow_1 => pow_5
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mm, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {})
triton_red_fused_mean_pow_1 = async_compile.triton('triton_red_fused_mean_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg0_1, buf4, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input2_mean, input2_1, norm_1, add_1, input2_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div]
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg1_1, buf5, 4, 64, grid=grid(4), stream=stream0)
del arg1_1
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, input2_l2, mm], Original ATen: [aten.add, aten.div, aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0), buf5, out=buf6)
del buf4
del buf5
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean]
triton_red_fused_mean_pow_1.run(buf8, buf6, 1, 4096, grid=grid(1), stream=stream0)
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class DiffLoss(nn.Module):
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, input1, input2):
batch_size = input1.size(0)
input1 = input1.view(batch_size, -1)
input2 = input2.view(batch_size, -1)
input1_mean = torch.mean(input1, dim=0, keepdims=True)
input2_mean = torch.mean(input2, dim=0, keepdims=True)
input1 = input1 - input1_mean
input2 = input2 - input2_mean
input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-06)
input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-06)
diff_loss = torch.mean(input1_l2.t().mm(input2_l2).pow(2))
return diff_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = tmp16 + tmp17
tmp19 = tmp10 / tmp18
tl.store(out_ptr2 + (r1 + 64 * x0), tmp19, xmask)
@triton.jit
def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg0_1,
buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg1_1,
buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0),
buf5, out=buf6)
del buf4
del buf5
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7
del buf7
triton_red_fused_mean_pow_1[grid(1)](buf8, buf6, 1, 4096, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del buf6
return buf8,
class DiffLossNew(nn.Module):
def __init__(self):
super(DiffLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Wang-Chuanyu/MMSA | DiffLoss | false | 5,948 | [
"MIT"
] | 1 | 2a720530c369e68656102287edb651780e827135 | https://github.com/Wang-Chuanyu/MMSA/tree/2a720530c369e68656102287edb651780e827135 | import torch
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input1, input2):
batch_size = input1.size(0)
input1 = input1.view(batch_size, -1)
input2 = input2.view(batch_size, -1)
input1_mean = torch.mean(input1, dim=0, keepdims=True)
input2_mean = torch.mean(input2, dim=0, keepdims=True)
input1 = input1 - input1_mean
input2 = input2 - input2_mean
input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-06)
input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-06)
diff_loss = torch.mean(input1_l2.t().mm(input2_l2).pow(2))
return diff_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BBoxTransform | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6s/c6sbsxdzyxnyaoinhkjjxf63e3bablnuxukt3ntviotrgdiysvm5.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_3, %sub_2, %add_5, %add_4], 2), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp6
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp15 * tmp8
tmp17 = tmp16 * tmp11
tmp18 = tmp13 - tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 8, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + ((4*((-4) + x0)) + (16*x1)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (2 + (4*((-4) + x0)) + (16*x1)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr1 + ((4*((-4) + x0)) + (16*x1)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tmp25 * tmp28
tmp30 = tmp27 + tmp26
tmp31 = tmp30 * tmp11
tmp32 = tmp29 + tmp31
tmp33 = tl.load(in_ptr0 + (2 + (4*((-4) + x0)) + (16*x1)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 * tmp28
tmp36 = tmp35 * tmp11
tmp37 = tmp32 - tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp24, tmp37, tmp38)
tmp40 = tmp0 >= tmp22
tmp41 = tl.full([1], 12, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tl.load(in_ptr0 + (1 + (4*((-8) + x0)) + (16*x1)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp45 = tl.load(in_ptr1 + (3 + (4*((-8) + x0)) + (16*x1)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (1 + (4*((-8) + x0)) + (16*x1)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tmp45 - tmp46
tmp48 = tmp44 * tmp47
tmp49 = tmp46 + tmp45
tmp50 = tmp49 * tmp11
tmp51 = tmp48 + tmp50
tmp52 = tl.load(in_ptr0 + (3 + (4*((-8) + x0)) + (16*x1)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp53 * tmp47
tmp55 = tmp54 * tmp11
tmp56 = tmp51 + tmp55
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp43, tmp56, tmp57)
tmp59 = tmp0 >= tmp41
tmp60 = tl.full([1], 16, tl.int64)
tmp61 = tmp0 < tmp60
tmp62 = tl.load(in_ptr0 + ((4*((-12) + x0)) + (16*x1)), tmp59 & xmask, eviction_policy='evict_last', other=0.0)
tmp63 = tl.load(in_ptr1 + (2 + (4*((-12) + x0)) + (16*x1)), tmp59 & xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.load(in_ptr1 + ((4*((-12) + x0)) + (16*x1)), tmp59 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tmp63 - tmp64
tmp66 = tmp62 * tmp65
tmp67 = tmp64 + tmp63
tmp68 = tmp67 * tmp11
tmp69 = tmp66 + tmp68
tmp70 = tl.load(in_ptr0 + (2 + (4*((-12) + x0)) + (16*x1)), tmp59 & xmask, eviction_policy='evict_last', other=0.0)
tmp71 = tl_math.exp(tmp70)
tmp72 = tmp71 * tmp65
tmp73 = tmp72 * tmp11
tmp74 = tmp69 + tmp73
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp59, tmp74, tmp75)
tmp77 = tl.where(tmp43, tmp58, tmp76)
tmp78 = tl.where(tmp24, tmp39, tmp77)
tmp79 = tl.where(tmp4, tmp20, tmp78)
tl.store(out_ptr0 + (x2), tmp79, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.onnx
class BBoxTransform(nn.Module):
def forward(self, anchors, regression):
"""
decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py
Args:
anchors: [batchsize, boxes, (y1, x1, y2, x2)]
regression: [batchsize, boxes, (dy, dx, dh, dw)]
Returns:
"""
y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
w = regression[..., 3].exp() * wa
h = regression[..., 2].exp() * ha
y_centers = regression[..., 0] * ha + y_centers_a
x_centers = regression[..., 1] * wa + x_centers_a
ymin = y_centers - h / 2.0
xmin = x_centers - w / 2.0
ymax = y_centers + h / 2.0
xmax = x_centers + w / 2.0
return torch.stack([xmin, ymin, xmax, ymax], dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp6
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp15 * tmp8
tmp17 = tmp16 * tmp11
tmp18 = tmp13 - tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 8, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (2 + 4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr1 + (4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tmp25 * tmp28
tmp30 = tmp27 + tmp26
tmp31 = tmp30 * tmp11
tmp32 = tmp29 + tmp31
tmp33 = tl.load(in_ptr0 + (2 + 4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 * tmp28
tmp36 = tmp35 * tmp11
tmp37 = tmp32 - tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp24, tmp37, tmp38)
tmp40 = tmp0 >= tmp22
tmp41 = tl.full([1], 12, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tl.load(in_ptr0 + (1 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = tl.load(in_ptr1 + (3 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (1 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = tmp45 - tmp46
tmp48 = tmp44 * tmp47
tmp49 = tmp46 + tmp45
tmp50 = tmp49 * tmp11
tmp51 = tmp48 + tmp50
tmp52 = tl.load(in_ptr0 + (3 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp53 * tmp47
tmp55 = tmp54 * tmp11
tmp56 = tmp51 + tmp55
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp43, tmp56, tmp57)
tmp59 = tmp0 >= tmp41
tl.full([1], 16, tl.int64)
tmp62 = tl.load(in_ptr0 + (4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp63 = tl.load(in_ptr1 + (2 + 4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp64 = tl.load(in_ptr1 + (4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tmp63 - tmp64
tmp66 = tmp62 * tmp65
tmp67 = tmp64 + tmp63
tmp68 = tmp67 * tmp11
tmp69 = tmp66 + tmp68
tmp70 = tl.load(in_ptr0 + (2 + 4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp71 = tl_math.exp(tmp70)
tmp72 = tmp71 * tmp65
tmp73 = tmp72 * tmp11
tmp74 = tmp69 + tmp73
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp59, tmp74, tmp75)
tmp77 = tl.where(tmp43, tmp58, tmp76)
tmp78 = tl.where(tmp24, tmp39, tmp77)
tmp79 = tl.where(tmp4, tmp20, tmp78)
tl.store(out_ptr0 + x2, tmp79, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class BBoxTransformNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Wabinab/eye_of_ml | BBoxTransform | false | 5,949 | [
"Apache-2.0"
] | 1 | 9c475ddf4e56d84bc5a23d871d59169bc6061ab0 | https://github.com/Wabinab/eye_of_ml/tree/9c475ddf4e56d84bc5a23d871d59169bc6061ab0 | import torch
from torch import nn
import torch.onnx
class Model(nn.Module):
def forward(self, anchors, regression):
"""
decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py
Args:
anchors: [batchsize, boxes, (y1, x1, y2, x2)]
regression: [batchsize, boxes, (dy, dx, dh, dw)]
Returns:
"""
y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
w = regression[..., 3].exp() * wa
h = regression[..., 2].exp() * ha
y_centers = regression[..., 0] * ha + y_centers_a
x_centers = regression[..., 1] * wa + x_centers_a
ymin = y_centers - h / 2.0
xmin = x_centers - w / 2.0
ymax = y_centers + h / 2.0
xmax = x_centers + w / 2.0
return torch.stack([xmin, ymin, xmax, ymax], dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6a/c6at7k6mlvvyb6bhxconu56vmlljdfsi5u47zlmh7d3irv3tnmhl.py
# Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# diffs => add
# mse => div
# neg => neg
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {})
triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 0.00390625
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class MSE(nn.Module):
def __init__(self):
super(MSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
mse = torch.sum(diffs.pow(2)) / n
return mse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 0.00390625
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSENew(nn.Module):
def __init__(self):
super(MSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Wang-Chuanyu/MMSA | MSE | false | 5,950 | [
"MIT"
] | 1 | 2a720530c369e68656102287edb651780e827135 | https://github.com/Wang-Chuanyu/MMSA/tree/2a720530c369e68656102287edb651780e827135 | import torch
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
mse = torch.sum(diffs.pow(2)) / n
return mse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AdditiveAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/db/cdb6orpzcjvcc65ejodjhttcjxgr4iaztip7m5hi5zybnxdyw27e.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear => mm
# Graph fragment:
# %mm : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze, %permute), kwargs = {})
triton_poi_fused_mm_0 = async_compile.triton('triton_poi_fused_mm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bm/cbmesc6weagqk622czwasnzu5epon5cc5ovn5t6hofhjxwhoexav.py
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear_9 => mm_9
# Graph fragment:
# %mm_9 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_3, %permute), kwargs = {})
triton_poi_fused_mm_1 = async_compile.triton('triton_poi_fused_mm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cr/ccrdnki3esestiav3wzifbxs2dsp6qm4xynr2k7mndgeh24ksu6u.py
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear_3 => mm_3
# Graph fragment:
# %mm_3 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_1, %permute), kwargs = {})
triton_poi_fused_mm_2 = async_compile.triton('triton_poi_fused_mm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yj/cyjwsywo4vpgjfloa75r4g2nwy5yybh2megkzl5ajpdfnreuuwdz.py
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
# Source node to ATen node mapping:
# linear_6 => mm_6
# Graph fragment:
# %mm_6 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_2, %permute), kwargs = {})
triton_poi_fused_mm_3 = async_compile.triton('triton_poi_fused_mm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gs/cgstfojllbpw5k2ophtflxm3yxw6rgbrfltujfdb4ygg2ij6pi2x.py
# Topologically Sorted Source Nodes: [add, tanh, add_1, tanh_1, add_2, tanh_2, add_3, tanh_3], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# tanh => tanh
# tanh_1 => tanh_1
# tanh_2 => tanh_2
# tanh_3 => tanh_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %mm_1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %mm_1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %mm_1), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_3, %mm_1), kwargs = {})
# %tanh_3 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {})
triton_poi_fused_add_tanh_4 = async_compile.triton('triton_poi_fused_add_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp5 = tmp4 + tmp1
tmp6 = libdevice.tanh(tmp5)
tmp8 = tmp7 + tmp1
tmp9 = libdevice.tanh(tmp8)
tmp11 = tmp10 + tmp1
tmp12 = libdevice.tanh(tmp11)
tl.store(out_ptr0 + (x2), tmp3, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
tl.store(out_ptr2 + (x2), tmp9, xmask)
tl.store(out_ptr3 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xc/cxc5doccwebfyrprs4yc5eszfjx732wl6fqnr3rgnt2avrxpjv6g.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%cat_mm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat_mm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cn/ccnuca5xfcii44dqwy7ozzizo6jkf7qvyd2gmp6ieo5azn6ggycd.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lp/clp73dcmr6zqgcaipqcinn4bpx3557brwflwcrr2th4qc242xqyi.py
# Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# final_context_vec => sum_2
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_4, %primals_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_7 = async_compile.triton('triton_poi_fused_mul_sum_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
stream0 = get_raw_stream(0)
triton_poi_fused_mm_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf10 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm]
triton_poi_fused_mm_1.run(primals_1, buf10, 4, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm]
extern_kernels.mm(buf10, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf11)
buf4 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
triton_poi_fused_mm_2.run(primals_1, buf4, 4, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf5)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
triton_poi_fused_mm_3.run(primals_1, buf7, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
extern_kernels.mm(buf7, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf8)
del buf7
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, tanh, add_1, tanh_1, add_2, tanh_2, add_3, tanh_3], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_4.run(buf1, buf2, buf5, buf8, buf11, buf3, buf6, buf9, buf12, 16, grid=grid(16), stream=stream0)
del buf1
del buf11
del buf5
del buf8
buf17 = buf2; del buf2 # reuse
buf13 = reinterpret_tensor(buf17, (4, 1), (4, 1), 0) # alias
# Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat]
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf17, (4, 1), (4, 1), 1) # alias
# Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat]
extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf14)
buf15 = reinterpret_tensor(buf17, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat]
extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf17, (4, 1), (4, 1), 3) # alias
# Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat]
extern_kernels.mm(buf12, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf16)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf17, buf18, 16, grid=grid(16), stream=stream0)
del buf13
del buf14
del buf15
del buf16
buf19 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf18, buf19, 16, grid=grid(16), stream=stream0)
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_7.run(buf19, primals_1, buf20, 16, grid=grid(16), stream=stream0)
return (buf20, reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0), primals_1, primals_4, reinterpret_tensor(primals_1, (1, 4), (16, 4), 0), buf3, reinterpret_tensor(primals_1, (1, 4), (16, 4), 1), buf6, reinterpret_tensor(primals_1, (1, 4), (16, 4), 2), buf9, reinterpret_tensor(primals_1, (1, 4), (16, 4), 3), buf12, buf19, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AdditiveAttention(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(AdditiveAttention, self).__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, encoder_states, decoder_state):
score_vec = torch.cat([self.score(encoder_states[:, i],
decoder_state) for i in range(encoder_states.shape[1])], dim=1)
attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, attention_probs
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp5 = tmp4 + tmp1
tmp6 = libdevice.tanh(tmp5)
tmp8 = tmp7 + tmp1
tmp9 = libdevice.tanh(tmp8)
tmp11 = tmp10 + tmp1
tmp12 = libdevice.tanh(tmp11)
tl.store(out_ptr0 + x2, tmp3, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
tl.store(out_ptr2 + x2, tmp9, xmask)
tl.store(out_ptr3 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mm_0[grid(4)](primals_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf2)
del primals_3
buf10 = buf0
del buf0
triton_poi_fused_mm_1[grid(4)](primals_1, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_2, (4, 4), (1,
4), 0), out=buf11)
buf4 = buf10
del buf10
triton_poi_fused_mm_2[grid(4)](primals_1, buf4, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf5)
buf7 = buf4
del buf4
triton_poi_fused_mm_3[grid(4)](primals_1, buf7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf8)
del buf7
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_tanh_4[grid(16)](buf1, buf2, buf5, buf8, buf11,
buf3, buf6, buf9, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf11
del buf5
del buf8
buf17 = buf2
del buf2
buf13 = reinterpret_tensor(buf17, (4, 1), (4, 1), 0)
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4
), 0), out=buf13)
buf14 = reinterpret_tensor(buf17, (4, 1), (4, 1), 1)
extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (4, 1), (1, 4
), 0), out=buf14)
buf15 = reinterpret_tensor(buf17, (4, 1), (4, 1), 2)
extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 1), (1, 4
), 0), out=buf15)
buf16 = reinterpret_tensor(buf17, (4, 1), (4, 1), 3)
extern_kernels.mm(buf12, reinterpret_tensor(primals_5, (4, 1), (1,
4), 0), out=buf16)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf17, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del buf15
del buf16
buf19 = buf17
del buf17
triton_poi_fused__softmax_6[grid(16)](buf18, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf20 = buf18
del buf18
triton_poi_fused_mul_sum_7[grid(16)](buf19, primals_1, buf20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf20, reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0
), primals_1, primals_4, reinterpret_tensor(primals_1, (1, 4), (16,
4), 0), buf3, reinterpret_tensor(primals_1, (1, 4), (16, 4), 1
), buf6, reinterpret_tensor(primals_1, (1, 4), (16, 4), 2
), buf9, reinterpret_tensor(primals_1, (1, 4), (16, 4), 3
), buf12, buf19, primals_5
class AdditiveAttentionNew(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super(AdditiveAttentionNew, self).__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, input_0, input_1):
primals_1 = self.w1.weight
primals_2 = self.w2.weight
primals_5 = self.v.weight
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| Vision-CAIR/UnlikelihoodMotionForecasting | AdditiveAttention | false | 5,951 | [
"MIT"
] | 1 | 556d6a3ed3e4e0e2d88108d7dbb48933313b58aa | https://github.com/Vision-CAIR/UnlikelihoodMotionForecasting/tree/556d6a3ed3e4e0e2d88108d7dbb48933313b58aa | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim,
internal_dim=None):
super().__init__()
if internal_dim is None:
internal_dim = int((encoder_hidden_state_dim +
decoder_hidden_state_dim) / 2)
self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False)
self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False)
self.v = nn.Linear(internal_dim, 1, bias=False)
def score(self, encoder_state, decoder_state):
return self.v(torch.tanh(self.w1(encoder_state) + self.w2(
decoder_state)))
def forward(self, encoder_states, decoder_state):
score_vec = torch.cat([self.score(encoder_states[:, i],
decoder_state) for i in range(encoder_states.shape[1])], dim=1)
attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2)
final_context_vec = torch.sum(attention_probs * encoder_states, dim=1)
return final_context_vec, attention_probs
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
CapsuleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fg/cfgncfnc3vamedzi7zp5gtjfdb47sqygzojfulisfasnwnsnks4j.py
# Topologically Sorted Source Nodes: [sub, relu, mul, sub_1, mul_1, sub_2, relu_1, mul_2, add, class_loss], Original ATen: [aten.rsub, aten.relu, aten.mul, aten.sub, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# class_loss => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# relu => relu
# relu_1 => relu_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.9, %arg0_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %relu), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %relu_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused_add_mean_mul_relu_rsub_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_relu_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_relu_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_relu_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp0
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = 0.1
tmp12 = tmp1 - tmp11
tmp13 = triton_helpers.maximum(tmp4, tmp12)
tmp14 = tmp10 * tmp13
tmp15 = tmp6 + tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, relu, mul, sub_1, mul_1, sub_2, relu_1, mul_2, add, class_loss], Original ATen: [aten.rsub, aten.relu, aten.mul, aten.sub, aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_rsub_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
def forward(self, output, target):
class_loss = (target * F.relu(0.9 - output) + 0.5 * (1 - target) *
F.relu(output - 0.1)).mean()
return class_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_relu_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp0
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = 0.1
tmp12 = tmp1 - tmp11
tmp13 = triton_helpers.maximum(tmp4, tmp12)
tmp14 = tmp10 * tmp13
tmp15 = tmp6 + tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_rsub_sub_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class CapsuleLossNew(nn.Module):
def __init__(self):
super(CapsuleLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | CapsuleLoss | false | 5,952 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
class_loss = (target * F.relu(0.9 - output) + 0.5 * (1 - target) *
F.relu(output - 0.1)).mean()
return class_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DDPGConvBody | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/te/ctesp4dgksjo6vch2othyeldjyg2lawods7xb33gmgra4scypxxd.py
# Topologically Sorted Source Nodes: [conv2d, y], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# conv2d => convolution
# y => expm1, gt, mul, mul_2, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 961) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fe/cfeyjg2ieifb4duknw7wq5djcxmmx64rp7mmvtqracnkw3qgb3ny.py
# Topologically Sorted Source Nodes: [conv2d_1, y_1], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# y_1 => expm1_1, gt_1, mul_3, mul_5, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {})
triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 107648
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 841) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 31, 31), (30752, 961, 31, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, y], Original ATen: [aten.convolution, aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 123008, grid=grid(123008), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 29, 29), (26912, 841, 29, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 32, 29, 29), (26912, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, y_1], Original ATen: [aten.convolution, aten.elu]
triton_poi_fused_convolution_elu_1.run(buf4, primals_5, buf5, 107648, grid=grid(107648), stream=stream0)
del primals_5
return (reinterpret_tensor(buf5, (4, 26912), (26912, 1), 0), primals_1, primals_3, primals_4, buf1, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
class DDPGConvBody(nn.Module):
def __init__(self, in_channels=4):
super(DDPGConvBody, self).__init__()
self.feature_dim = 39 * 39 * 32
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=3,
stride=2))
self.conv2 = layer_init(nn.Conv2d(32, 32, kernel_size=3))
def forward(self, x):
y = F.elu(self.conv1(x))
y = F.elu(self.conv2(y))
y = y.view(y.size(0), -1)
return y
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 107648
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 31, 31), (30752, 961, 31, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 32, 31, 31), (30752, 961, 31, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(123008)](buf1, primals_2,
buf2, 123008, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 29, 29), (26912, 841, 29, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 32, 29, 29), (26912, 841, 29, 1),
torch.float32)
triton_poi_fused_convolution_elu_1[grid(107648)](buf4, primals_5,
buf5, 107648, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (4, 26912), (26912, 1), 0
), primals_1, primals_3, primals_4, buf1, buf2, buf4
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
class DDPGConvBodyNew(nn.Module):
def __init__(self, in_channels=4):
super(DDPGConvBodyNew, self).__init__()
self.feature_dim = 39 * 39 * 32
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=3,
stride=2))
self.conv2 = layer_init(nn.Conv2d(32, 32, kernel_size=3))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Sohojoe/UdacityDeepRL-Project2 | DDPGConvBody | false | 5,953 | [
"MIT"
] | 1 | 7137eea0b606ea32d00424d23130ff213f03ecf1 | https://github.com/Sohojoe/UdacityDeepRL-Project2/tree/7137eea0b606ea32d00424d23130ff213f03ecf1 | import torch
import torch.nn as nn
import torch.nn.functional as F
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
class Model(nn.Module):
def __init__(self, in_channels=4):
super().__init__()
self.feature_dim = 39 * 39 * 32
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=3,
stride=2))
self.conv2 = layer_init(nn.Conv2d(32, 32, kernel_size=3))
def forward(self, x):
y = F.elu(self.conv1(x))
y = F.elu(self.conv2(y))
y = y.view(y.size(0), -1)
return y
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return []
|
CustomKLLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gp/cgp2oconuvby26d6uksthbr3wh55rudltimi2v7mlyf2hhdppq42.py
# Topologically Sorted Source Nodes: [mul, mean, mul_1, mean_1, add, mul_2, log, mean_2, sub, sub_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.log, aten.sub]
# Source node to ATen node mapping:
# add => add
# log => log
# mean => mean
# mean_1 => mean_1
# mean_2 => mean_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul_2,), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 1), kwargs = {})
triton_per_fused_add_log_mean_mul_sub_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl_math.log(tmp6)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp4 / tmp14
tmp16 = tmp9 / tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp13 / tmp14
tmp19 = tmp17 - tmp18
tmp20 = 1.0
tmp21 = tmp19 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mean, mul_1, mean_1, add, mul_2, log, mean_2, sub, sub_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.log, aten.sub]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sub_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class CustomKLLoss(_Loss):
"""
KL_Loss = (|dot(mean , mean)| + |dot(std, std)| - |log(dot(std, std))| - 1) / N
N is the total number of image voxels
"""
def __init__(self, *args, **kwargs):
super(CustomKLLoss, self).__init__()
def forward(self, mean, std):
return torch.mean(torch.mul(mean, mean)) + torch.mean(torch.mul(std,
std)) - torch.mean(torch.log(torch.mul(std, std))) - 1
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl_math.log(tmp6)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp4 / tmp14
tmp16 = tmp9 / tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp13 / tmp14
tmp19 = tmp17 - tmp18
tmp20 = 1.0
tmp21 = tmp19 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sub_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class CustomKLLossNew(_Loss):
"""
KL_Loss = (|dot(mean , mean)| + |dot(std, std)| - |log(dot(std, std))| - 1) / N
N is the total number of image voxels
"""
def __init__(self, *args, **kwargs):
super(CustomKLLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | CustomKLLoss | false | 5,954 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch.nn.modules.loss import _Loss
class Model(_Loss):
"""
KL_Loss = (|dot(mean , mean)| + |dot(std, std)| - |log(dot(std, std))| - 1) / N
N is the total number of image voxels
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, mean, std):
return torch.mean(torch.mul(mean, mean)) + torch.mean(torch.mul(std,
std)) - torch.mean(torch.log(torch.mul(std, std))) - 1
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SIMSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yv/cyvtugm5zmfyef6ozbsgz6j663e2vlhm4figzcm4sgsvgkz2jjx5.py
# Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div]
# Source node to ATen node mapping:
# diffs => add
# neg => neg
# pow_1 => pow_1
# simse => div
# sum_1 => sum_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, 65536), kwargs = {})
triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6 * tmp6
tmp8 = 1.52587890625e-05
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class SIMSE(nn.Module):
def __init__(self):
super(SIMSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
simse = torch.sum(diffs).pow(2) / n ** 2
return simse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6 * tmp6
tmp8 = 1.52587890625e-05
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SIMSENew(nn.Module):
def __init__(self):
super(SIMSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Wang-Chuanyu/MMSA | SIMSE | false | 5,955 | [
"MIT"
] | 1 | 2a720530c369e68656102287edb651780e827135 | https://github.com/Wang-Chuanyu/MMSA/tree/2a720530c369e68656102287edb651780e827135 | import torch
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
simse = torch.sum(diffs).pow(2) / n ** 2
return simse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GridAttentionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yg/cygmdn2auc3tes3j4f4vkac3dflpmrpbzuwybdwsm7q2cg5n4bbd.py
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# phi_g => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7n/c7nzf22cnpbzyd2j2wkts5chwjan7tdk7qdk4mpjozf4w4f6h2mb.py
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# phi_g => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 3), kwargs = {})
triton_poi_fused_add_clamp_1 = async_compile.triton('triton_poi_fused_add_clamp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ix/cixcyqvwbk6rxsv5l4regwkl4tvvkxddkkv7q5vh5eqteaen7y4v.py
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# phi_g => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = triton_helpers.minimum(tmp12, tmp4)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v3/cv3ab6upk47hyclwxrouatwmywwvfnmb6wfdb6utqizg76a6vhxg.py
# Topologically Sorted Source Nodes: [theta_x, conv2d_1, phi_g, add, f], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.relu]
# Source node to ATen node mapping:
# add => add_7
# conv2d_1 => convolution_1
# f => relu
# phi_g => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_2, mul_3, mul_4, sub_3, sub_4, sub_6
# theta_x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %add_6), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_7,), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x5 = (xindex // 16)
x2 = (xindex // 16) % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x6), xmask)
tmp37 = tl.load(in_ptr8 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (4*tmp25) + (16*x5)), xmask, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (4*tmp25) + (16*x5)), xmask, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp38 = tmp36 + tmp37
tmp39 = tmp21 + tmp35
tmp40 = tmp38 + tmp39
tmp41 = tl.full([1], 0, tl.int32)
tmp42 = triton_helpers.maximum(tmp41, tmp40)
tl.store(in_out_ptr0 + (x6), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mf/cmf6jsm5hziodzx7nnkrbjmuzqmfryrki35b7ggnsadoofxr2pic.py
# Topologically Sorted Source Nodes: [conv2d_2, sigm_psi_f], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# sigm_psi_f => sigmoid
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [theta_x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_1.run(buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_0.run(buf4, 4, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_1.run(buf5, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf6, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [phi_g], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf8, 4, grid=grid(4), stream=stream0)
buf10 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [theta_x, conv2d_1, phi_g, add, f], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.relu]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3.run(buf10, buf2, buf4, buf1, primals_6, buf5, buf6, buf3, buf8, primals_4, 256, grid=grid(256), stream=stream0)
del buf1
del primals_4
del primals_6
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 4, 4), (16, 16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, sigm_psi_f], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_4.run(buf12, primals_8, 64, grid=grid(64), stream=stream0)
del primals_8
return (buf12, primals_1, primals_2, primals_3, primals_5, primals_7, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class GridAttentionBlock(nn.Module):
def __init__(self, in_channels):
super(GridAttentionBlock, self).__init__()
self.inter_channels = in_channels
self.in_channels = in_channels
self.gating_channels = in_channels
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.gating_channels, out_channels
=self.inter_channels, kernel_size=1, stride=1, padding=0, bias=True
)
self.psi = nn.Conv2d(in_channels=self.inter_channels, out_channels=
1, kernel_size=1, stride=1, padding=0, bias=True)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode=
'bilinear')
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
return sigm_psi_f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = triton_helpers.minimum(tmp12, tmp4)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x5 = xindex // 16
x2 = xindex // 16 % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + x6, xmask)
tmp37 = tl.load(in_ptr8 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 4 * tmp25 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 4 * tmp25 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp38 = tmp36 + tmp37
tmp39 = tmp21 + tmp35
tmp40 = tmp38 + tmp39
tmp41 = tl.full([1], 0, tl.int32)
tmp42 = triton_helpers.maximum(tmp41, tmp40)
tl.store(in_out_ptr0 + x6, tmp42, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(4)](buf2, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_0[grid(4)](buf4, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_1[grid(4)](buf5, 4, XBLOCK=4, num_warps=
1, num_stages=1)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(4)](buf6,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(4)](buf8,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf10 = buf0
del buf0
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_3[grid(256)
](buf10, buf2, buf4, buf1, primals_6, buf5, buf6, buf3, buf8,
primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_4
del primals_6
buf11 = extern_kernels.convolution(buf10, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 4, 4), (16, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_sigmoid_4[grid(64)](buf12, primals_8,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
return (buf12, primals_1, primals_2, primals_3, primals_5, primals_7,
buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf12)
class GridAttentionBlockNew(nn.Module):
def __init__(self, in_channels):
super(GridAttentionBlockNew, self).__init__()
self.inter_channels = in_channels
self.in_channels = in_channels
self.gating_channels = in_channels
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.gating_channels, out_channels
=self.inter_channels, kernel_size=1, stride=1, padding=0, bias=True
)
self.psi = nn.Conv2d(in_channels=self.inter_channels, out_channels=
1, kernel_size=1, stride=1, padding=0, bias=True)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0, input_1):
primals_3 = self.theta.weight
primals_4 = self.theta.bias
primals_5 = self.phi.weight
primals_6 = self.phi.bias
primals_7 = self.psi.weight
primals_8 = self.psi.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| WHU-YH-jx/bionetwork_segmentation | GridAttentionBlock | false | 5,956 | [
"MIT"
] | 1 | 556c5b61a1a3784875b31eacb8c6bb418d70ee9a | https://github.com/WHU-YH-jx/bionetwork_segmentation/tree/556c5b61a1a3784875b31eacb8c6bb418d70ee9a | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.inter_channels = in_channels
self.in_channels = in_channels
self.gating_channels = in_channels
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.gating_channels, out_channels
=self.inter_channels, kernel_size=1, stride=1, padding=0, bias=True
)
self.psi = nn.Conv2d(in_channels=self.inter_channels, out_channels=
1, kernel_size=1, stride=1, padding=0, bias=True)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode=
'bilinear')
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
return sigm_psi_f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Relu_Caps | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/da/cda54wucsyoxntlaaojzjqy4xtfp4dnbmnlc7tfccok7l3lmpcqj.py
# Topologically Sorted Source Nodes: [sub, relu, mul_1, add, x_caps_1], Original ATen: [aten.sub, aten.relu, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mul_1 => mul_1
# relu => relu
# sub => sub
# x_caps_1 => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 0.2), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, 0.0001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_mul_relu_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.2
tmp13 = tmp11 - tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = 0.0001
tmp19 = tmp11 + tmp18
tmp20 = tmp17 / tmp19
tl.store(out_ptr0 + (x3), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, relu, mul_1, add, x_caps_1], Original ATen: [aten.sub, aten.relu, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_relu_sub_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Relu_Caps(nn.Module):
def __init__(self, num_C, num_D, theta=0.2, eps=0.0001):
super(Relu_Caps, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.theta = theta
self.eps = eps
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_C': 4, 'num_D': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mul_relu_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.2
tmp13 = tmp11 - tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = 0.0001
tmp19 = tmp11 + tmp18
tmp20 = tmp17 / tmp19
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_relu_sub_0[grid(1024)](arg0_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 4, 4), (256, 16, 4, 1), 0),
class Relu_CapsNew(nn.Module):
def __init__(self, num_C, num_D, theta=0.2, eps=0.0001):
super(Relu_CapsNew, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.theta = theta
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | Relu_Caps | false | 5,957 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_C, num_D, theta=0.2, eps=0.0001):
super().__init__()
self.num_C = num_C
self.num_D = num_D
self.theta = theta
self.eps = eps
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c3/cc35rxagqvvcixjawxcetgbs56htfzstazykjhyczpil5zctftss.py
# Topologically Sorted Source Nodes: [mul, intersect, add, denominator], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# denominator => sum_2
# intersect => sum_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_4, %slice_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [-1]), kwargs = {})
triton_per_fused_add_mul_sum_0 = async_compile.triton('triton_per_fused_add_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 15
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + r1 + (16*x0)), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (1 + r1 + (16*x0)), rmask & xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 + tmp2
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5i/c5igbunkwt5rtqc5m4objcih3pcqb6hetejxaqtxaz6ll2xxrvjb.py
# Topologically Sorted Source Nodes: [mul_1, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.clamp, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# clamp => clamp_min
# mean => mean
# mul_1 => mul_1
# per_channel_dice => div
# sub => sub
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_2, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %clamp_min), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_clamp_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_clamp_div_mean_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = 1e-05
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 / tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersect, add, denominator], Original ATen: [aten.mul, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, 4, 15, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_1, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.clamp, aten.div, aten.rsub, aten.mean]
triton_per_fused_clamp_div_mean_mul_rsub_1.run(buf3, buf0, buf1, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.autograd import Variable
def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index=
None, weight=None):
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
if ignore_index is not None:
mask = target.clone().ne_(ignore_index)
mask.requires_grad = False
input = input * mask
target = target * mask
seg_pred = torch.reshape(input[0], [4, -1])
seg_true = torch.reshape(target[0], [4, -1])
seg_true = seg_true[:, 1:]
seg_pred = seg_pred[:, 1:]
intersect = (seg_pred * seg_true).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (seg_pred + seg_true).sum(-1)
return 2.0 * intersect / denominator.clamp(min=epsilon)
class DiceLoss(nn.Module):
"""Computes Dice Loss, which just 1 - DiceCoefficient described above.
Additionally allows per-class weights to be provided.
"""
def __init__(self, epsilon=1e-05, weight=None, ignore_index=None,
sigmoid_normalization=True, skip_last_target=False):
super(DiceLoss, self).__init__()
self.epsilon = epsilon
self.register_buffer('weight', weight)
self.ignore_index = ignore_index
if sigmoid_normalization:
self.normalization = nn.Sigmoid()
else:
self.normalization = nn.Softmax(dim=1)
self.skip_last_target = skip_last_target
def forward(self, input, target):
input = self.normalization(input)
if self.weight is not None:
weight = Variable(self.weight, requires_grad=False)
else:
weight = None
if self.skip_last_target:
target = target[:, :-1, ...]
per_channel_dice = compute_per_channel_dice(input, target, epsilon=
self.epsilon, ignore_index=self.ignore_index, weight=weight)
return torch.mean(1.0 - per_channel_dice)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 15
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + r1 + 16 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (1 + r1 + 16 * x0), rmask & xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 + tmp2
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_per_fused_clamp_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = 1e-05
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 / tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
4, 15, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_clamp_div_mean_mul_rsub_1[grid(1)](buf3, buf0,
buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index=
None, weight=None):
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
if ignore_index is not None:
mask = target.clone().ne_(ignore_index)
mask.requires_grad = False
input = input * mask
target = target * mask
seg_pred = torch.reshape(input[0], [4, -1])
seg_true = torch.reshape(target[0], [4, -1])
seg_true = seg_true[:, 1:]
seg_pred = seg_pred[:, 1:]
intersect = (seg_pred * seg_true).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (seg_pred + seg_true).sum(-1)
return 2.0 * intersect / denominator.clamp(min=epsilon)
class DiceLossNew(nn.Module):
"""Computes Dice Loss, which just 1 - DiceCoefficient described above.
Additionally allows per-class weights to be provided.
"""
def __init__(self, epsilon=1e-05, weight=None, ignore_index=None,
sigmoid_normalization=True, skip_last_target=False):
super(DiceLossNew, self).__init__()
self.epsilon = epsilon
self.register_buffer('weight', weight)
self.ignore_index = ignore_index
if sigmoid_normalization:
self.normalization = nn.Sigmoid()
else:
self.normalization = nn.Softmax(dim=1)
self.skip_last_target = skip_last_target
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | DiceLoss | false | 5,958 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
from torch.autograd import Variable
def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index=
None, weight=None):
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
if ignore_index is not None:
mask = target.clone().ne_(ignore_index)
mask.requires_grad = False
input = input * mask
target = target * mask
seg_pred = torch.reshape(input[0], [4, -1])
seg_true = torch.reshape(target[0], [4, -1])
seg_true = seg_true[:, 1:]
seg_pred = seg_pred[:, 1:]
intersect = (seg_pred * seg_true).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (seg_pred + seg_true).sum(-1)
return 2.0 * intersect / denominator.clamp(min=epsilon)
class Model(nn.Module):
"""Computes Dice Loss, which just 1 - DiceCoefficient described above.
Additionally allows per-class weights to be provided.
"""
def __init__(self, epsilon=1e-05, weight=None, ignore_index=None,
sigmoid_normalization=True, skip_last_target=False):
super().__init__()
self.epsilon = epsilon
self.register_buffer('weight', weight)
self.ignore_index = ignore_index
if sigmoid_normalization:
self.normalization = nn.Sigmoid()
else:
self.normalization = nn.Softmax(dim=1)
self.skip_last_target = skip_last_target
def forward(self, input, target):
input = self.normalization(input)
if self.weight is not None:
weight = Variable(self.weight, requires_grad=False)
else:
weight = None
if self.skip_last_target:
target = target[:, :-1, ...]
per_channel_dice = compute_per_channel_dice(input, target, epsilon=
self.epsilon, ignore_index=self.ignore_index, weight=weight)
return torch.mean(1.0 - per_channel_dice)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CPC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/uc/cuchgcqqjeu6jafhhyk67feqv4nze7nigvoksfs3n3ljifg6pjjn.py
# Topologically Sorted Source Nodes: [norm, x_pred_1], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# norm => pow_1, pow_2, sum_1
# x_pred_1 => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%addmm, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%addmm, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuesqaetaqcvav2j6lxtdm7pcvvk2zxym4e32eq7jzo2vexls2p.py
# Topologically Sorted Source Nodes: [mul, pos, neg, sub, mean, nce], Original ATen: [aten.mul, aten.sum, aten.logsumexp, aten.sub, aten.mean, aten.neg]
# Source node to ATen node mapping:
# mean => mean
# mul => mul
# nce => neg
# neg => abs_1, add, amax, eq, exp, full_default, log, sub, sum_4, where
# pos => sum_3
# sub => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mm, [-1], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %amax), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %squeeze), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1 = async_compile.triton('triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl_math.abs(tmp21)
tmp23 = float("inf")
tmp24 = tmp22 == tmp23
tmp25 = 0.0
tmp26 = tl.where(tmp24, tmp25, tmp21)
tmp27 = tmp15 - tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp16 - tmp26
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp18 - tmp26
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp35 = tmp20 - tmp26
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tl_math.log(tmp37)
tmp39 = tmp38 + tmp26
tmp40 = tmp14 - tmp39
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = 4.0
tmp45 = tmp43 / tmp44
tmp46 = -tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp46, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_pred], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, x_pred_1], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, x], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_poi_fused_div_linalg_vector_norm_0.run(primals_4, buf2, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mul, pos, neg, sub, mean, nce], Original ATen: [aten.mul, aten.sum, aten.logsumexp, aten.sub, aten.mean, aten.neg]
triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1.run(buf6, buf2, buf1, buf3, 1, 4, grid=grid(1), stream=stream0)
del buf1
return (buf6, primals_3, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.checkpoint
class CPC(nn.Module):
"""
Contrastive Predictive Coding: score computation. See https://arxiv.org/pdf/1807.03748.pdf.
Args:
x_size (int): embedding size of input modality representation x
y_size (int): embedding size of input modality representation y
"""
def __init__(self, x_size, y_size, n_layers=1, activation='Tanh'):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.layers = n_layers
self.activation = getattr(nn, activation)
if n_layers == 1:
self.net = nn.Linear(in_features=y_size, out_features=x_size)
else:
net = []
for i in range(n_layers):
if i == 0:
net.append(nn.Linear(self.y_size, self.x_size))
net.append(self.activation())
else:
net.append(nn.Linear(self.x_size, self.x_size))
self.net = nn.Sequential(*net)
def forward(self, x, y):
"""Calulate the score
"""
x_pred = self.net(y)
x_pred = x_pred / x_pred.norm(dim=1, keepdim=True)
x = x / x.norm(dim=1, keepdim=True)
pos = torch.sum(x * x_pred, dim=-1)
neg = torch.logsumexp(torch.matmul(x, x_pred.t()), dim=-1)
nce = -(pos - neg).mean()
return nce
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'x_size': 4, 'y_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl_math.abs(tmp21)
tmp23 = float('inf')
tmp24 = tmp22 == tmp23
tmp25 = 0.0
tmp26 = tl.where(tmp24, tmp25, tmp21)
tmp27 = tmp15 - tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp16 - tmp26
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp18 - tmp26
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp35 = tmp20 - tmp26
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tl_math.log(tmp37)
tmp39 = tmp38 + tmp26
tmp40 = tmp14 - tmp39
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = 4.0
tmp45 = tmp43 / tmp44
tmp46 = -tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp46, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(16)](buf0, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_0[grid(16)](primals_4, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1[grid(1)](buf6,
buf2, buf1, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf6, primals_3, buf0, buf2, buf3
class CPCNew(nn.Module):
"""
Contrastive Predictive Coding: score computation. See https://arxiv.org/pdf/1807.03748.pdf.
Args:
x_size (int): embedding size of input modality representation x
y_size (int): embedding size of input modality representation y
"""
def __init__(self, x_size, y_size, n_layers=1, activation='Tanh'):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.layers = n_layers
self.activation = getattr(nn, activation)
if n_layers == 1:
self.net = nn.Linear(in_features=y_size, out_features=x_size)
else:
net = []
for i in range(n_layers):
if i == 0:
net.append(nn.Linear(self.y_size, self.x_size))
net.append(self.activation())
else:
net.append(nn.Linear(self.x_size, self.x_size))
self.net = nn.Sequential(*net)
def forward(self, input_0, input_1):
primals_1 = self.net.weight
primals_2 = self.net.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Wang-Chuanyu/MMSA | CPC | false | 5,959 | [
"MIT"
] | 1 | 2a720530c369e68656102287edb651780e827135 | https://github.com/Wang-Chuanyu/MMSA/tree/2a720530c369e68656102287edb651780e827135 | import torch
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
"""
Contrastive Predictive Coding: score computation. See https://arxiv.org/pdf/1807.03748.pdf.
Args:
x_size (int): embedding size of input modality representation x
y_size (int): embedding size of input modality representation y
"""
def __init__(self, x_size, y_size, n_layers=1, activation='Tanh'):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.layers = n_layers
self.activation = getattr(nn, activation)
if n_layers == 1:
self.net = nn.Linear(in_features=y_size, out_features=x_size)
else:
net = []
for i in range(n_layers):
if i == 0:
net.append(nn.Linear(self.y_size, self.x_size))
net.append(self.activation())
else:
net.append(nn.Linear(self.x_size, self.x_size))
self.net = nn.Sequential(*net)
def forward(self, x, y):
"""Calulate the score
"""
x_pred = self.net(y)
x_pred = x_pred / x_pred.norm(dim=1, keepdim=True)
x = x / x.norm(dim=1, keepdim=True)
pos = torch.sum(x * x_pred, dim=-1)
neg = torch.logsumexp(torch.matmul(x, x_pred.t()), dim=-1)
nce = -(pos - neg).mean()
return nce
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Caps_Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pk/cpkb2mcyb2qesvmdu5p4ftvqi4wcrcxj2ewwby63ggg7slwrrxvk.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = (xindex // 4096) % 4
x2 = (xindex // 16384) % 4
x3 = (xindex // 65536)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*x2) + (16384*x1) + (65536*x3)), None)
tl.store(out_ptr0 + (x4), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e7/ce7f7focc67g3xs64gvyppnvzfkawnwxjxgg6rypx2yodlcvwk4z.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_6 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3721
x1 = (xindex // 3721) % 4
x2 = (xindex // 14884) % 4
x3 = (xindex // 59536)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (3721*x2) + (14884*x1) + (59536*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 4, 64, 64), (65536, 16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf1, 262144, grid=grid(262144), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (16, 4, 64, 64), (16384, 4096, 64, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (16, 4, 61, 61), (14884, 3721, 61, 1))
buf3 = empty_strided_cuda((4, 4, 4, 61, 61), (59536, 14884, 3721, 61, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, buf3, 238144, grid=grid(238144), stream=stream0)
del buf2
return (reinterpret_tensor(buf3, (4, 16, 61, 61), (59536, 3721, 61, 1), 0), primals_1, primals_2, primals_3, reinterpret_tensor(buf1, (16, 4, 64, 64), (16384, 4096, 64, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
class Caps_Conv(nn.Module):
def __init__(self, in_C, in_D, out_C, out_D, kernel_size, stride=1,
padding=0, dilation=1, bias=False):
super(Caps_Conv, self).__init__()
self.in_C = in_C
self.in_D = in_D
self.out_C = out_C
self.out_D = out_D
self.conv_D = nn.Conv2d(in_C * in_D, in_C * out_D, 1, groups=in_C,
bias=False)
self.conv_C = nn.Conv2d(in_C, out_C, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias)
m = self.conv_D.kernel_size[0] * self.conv_D.kernel_size[1
] * self.conv_D.out_channels
self.conv_D.weight.data.normal_(0, math.sqrt(2.0 / m))
n = self.conv_C.kernel_size[0] * self.conv_C.kernel_size[1
] * self.conv_C.out_channels
self.conv_C.weight.data.normal_(0, math.sqrt(2.0 / n))
if bias:
self.conv_C.bias.data.zero_()
def forward(self, x):
x = self.conv_D(x)
x = x.view(x.shape[0], self.in_C, self.out_D, x.shape[2], x.shape[3])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.in_C, x.shape[3], x.shape[4])
x = self.conv_C(x)
x = x.view(-1, self.out_D, self.out_C, x.shape[2], x.shape[3])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.out_C * self.out_D, x.shape[3], x.shape[4])
return x
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {'in_C': 4, 'in_D': 4, 'out_C': 4, 'out_D': 4,
'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = xindex // 4096 % 4
x2 = xindex // 16384 % 4
x3 = xindex // 65536
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x2 + 16384 * x1 + 65536 * x3), None)
tl.store(out_ptr0 + x4, tmp0, None)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3721
x1 = xindex // 3721 % 4
x2 = xindex // 14884 % 4
x3 = xindex // 59536
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 3721 * x2 + 14884 * x1 + 59536 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 4, 64, 64), (65536, 16384, 4096,
64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(262144)](buf0, buf1, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del buf0
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (16, 4,
64, 64), (16384, 4096, 64, 1), 0), primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (16, 4, 61, 61), (14884, 3721, 61, 1))
buf3 = empty_strided_cuda((4, 4, 4, 61, 61), (59536, 14884, 3721,
61, 1), torch.float32)
triton_poi_fused_clone_1[grid(238144)](buf2, buf3, 238144, XBLOCK=
512, num_warps=8, num_stages=1)
del buf2
return reinterpret_tensor(buf3, (4, 16, 61, 61), (59536, 3721, 61, 1), 0
), primals_1, primals_2, primals_3, reinterpret_tensor(buf1, (16, 4,
64, 64), (16384, 4096, 64, 1), 0)
class Caps_ConvNew(nn.Module):
def __init__(self, in_C, in_D, out_C, out_D, kernel_size, stride=1,
padding=0, dilation=1, bias=False):
super(Caps_ConvNew, self).__init__()
self.in_C = in_C
self.in_D = in_D
self.out_C = out_C
self.out_D = out_D
self.conv_D = nn.Conv2d(in_C * in_D, in_C * out_D, 1, groups=in_C,
bias=False)
self.conv_C = nn.Conv2d(in_C, out_C, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias)
m = self.conv_D.kernel_size[0] * self.conv_D.kernel_size[1
] * self.conv_D.out_channels
self.conv_D.weight.data.normal_(0, math.sqrt(2.0 / m))
n = self.conv_C.kernel_size[0] * self.conv_C.kernel_size[1
] * self.conv_C.out_channels
self.conv_C.weight.data.normal_(0, math.sqrt(2.0 / n))
if bias:
self.conv_C.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv_D.weight
primals_3 = self.conv_C.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | Caps_Conv | false | 5,960 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import math
import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_C, in_D, out_C, out_D, kernel_size, stride=1,
padding=0, dilation=1, bias=False):
super().__init__()
self.in_C = in_C
self.in_D = in_D
self.out_C = out_C
self.out_D = out_D
self.conv_D = nn.Conv2d(in_C * in_D, in_C * out_D, 1, groups=in_C,
bias=False)
self.conv_C = nn.Conv2d(in_C, out_C, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias)
m = self.conv_D.kernel_size[0] * self.conv_D.kernel_size[1
] * self.conv_D.out_channels
self.conv_D.weight.data.normal_(0, math.sqrt(2.0 / m))
n = self.conv_C.kernel_size[0] * self.conv_C.kernel_size[1
] * self.conv_C.out_channels
self.conv_C.weight.data.normal_(0, math.sqrt(2.0 / n))
if bias:
self.conv_C.bias.data.zero_()
def forward(self, x):
x = self.conv_D(x)
x = x.view(x.shape[0], self.in_C, self.out_D, x.shape[2], x.shape[3])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.in_C, x.shape[3], x.shape[4])
x = self.conv_C(x)
x = x.view(-1, self.out_D, self.out_C, x.shape[2], x.shape[3])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.out_C * self.out_D, x.shape[3], x.shape[4])
return x
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {'in_C': 4, 'in_D': 4, 'out_C': 4, 'out_D': 4,
'kernel_size': 4}]
|
MSEWithLogitsLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/uh/cuhlesngb5msrkia5vvfspdcw5c4qmftv75f3e2c4yfrh62n6jde.py
# Topologically Sorted Source Nodes: [sigmoid, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_sigmoid_0 = async_compile.triton('triton_per_fused_mse_loss_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sigmoid, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import MSELoss
class MSEWithLogitsLoss(MSELoss):
"""
This loss combines a `Sigmoid` layer and the `MSELoss` in one single class.
"""
def __init__(self):
super(MSEWithLogitsLoss, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, input, target):
return super().forward(self.sigmoid(input), target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn import MSELoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_sigmoid_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSEWithLogitsLossNew(MSELoss):
"""
This loss combines a `Sigmoid` layer and the `MSELoss` in one single class.
"""
def __init__(self):
super(MSEWithLogitsLossNew, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | MSEWithLogitsLoss | false | 5,961 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
from torch.nn import MSELoss
class Model(MSELoss):
"""
This loss combines a `Sigmoid` layer and the `MSELoss` in one single class.
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, input, target):
return super().forward(self.sigmoid(input), target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SoftDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vu/cvuhypre5iaxk4l6dwtcxj2vnv7mgf7bexthm22gttysfiy7dd32.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_3, mul_1, sum_2, mul_2, sum_3, add, union, dice, dice_loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# dice => div
# dice_loss => sub
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# sum_2 => sum_2
# sum_3 => sum_3
# union => add_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = tmp9 + tmp13
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tmp20 = 1.0
tmp21 = tmp20 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_3, mul_1, sum_2, mul_2, sum_3, add, union, dice, dice_loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class SoftDiceLoss(_Loss):
"""
Soft_Dice = 2*|dot(A, B)| / (|dot(A, A)| + |dot(B, B)| + eps)
eps is a small constant to avoid zero division,
"""
def __init__(self, *args, **kwargs):
super(SoftDiceLoss, self).__init__()
def forward(self, y_pred, y_true, eps=1e-08):
intersection = torch.sum(torch.mul(y_pred, y_true))
union = torch.sum(torch.mul(y_pred, y_pred)) + torch.sum(torch.mul(
y_true, y_true)) + eps
dice = 2 * intersection / union
dice_loss = 1 - dice
return dice_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = tmp9 + tmp13
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tmp20 = 1.0
tmp21 = tmp20 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class SoftDiceLossNew(_Loss):
"""
Soft_Dice = 2*|dot(A, B)| / (|dot(A, A)| + |dot(B, B)| + eps)
eps is a small constant to avoid zero division,
"""
def __init__(self, *args, **kwargs):
super(SoftDiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | SoftDiceLoss | false | 5,962 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch.nn.modules.loss import _Loss
class Model(_Loss):
"""
Soft_Dice = 2*|dot(A, B)| / (|dot(A, A)| + |dot(B, B)| + eps)
eps is a small constant to avoid zero division,
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, y_pred, y_true, eps=1e-08):
intersection = torch.sum(torch.mul(y_pred, y_true))
union = torch.sum(torch.mul(y_pred, y_pred)) + torch.sum(torch.mul(
y_true, y_true)) + eps
dice = 2 * intersection / union
dice_loss = 1 - dice
return dice_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Squash | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vn/cvnd2iskaaurc7lem5malwwtakejg65zghnsuuko655fwvqm7oxr.py
# Topologically Sorted Source Nodes: [mul_1, mul_2, add, x_caps_1], Original ATen: [aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mul_1 => mul_1
# mul_2 => mul_2
# x_caps_1 => div
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %unsqueeze), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1.0001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 * tmp12
tmp14 = tmp12 * tmp12
tmp15 = 1.0001
tmp16 = tmp14 + tmp15
tmp17 = tmp13 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, mul_2, add, x_caps_1], Original ATen: [aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Squash(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super(Squash, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = x_caps * x_length / (1 + self.eps + x_length * x_length)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_C': 4, 'num_D': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 * tmp12
tmp14 = tmp12 * tmp12
tmp15 = 1.0001
tmp16 = tmp14 + tmp15
tmp17 = tmp13 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_0[grid(1024)](arg0_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 4, 4), (256, 16, 4, 1), 0),
class SquashNew(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super(SquashNew, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | Squash | false | 5,963 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super().__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = x_caps * x_length / (1 + self.eps + x_length * x_length)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LinearCaps | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ue/cuebjdo54hcfres3xn3iznl3ebhyofowbofhn6u3l7q6dxtbbwzy.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %primals_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.0001
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 1, tl.int32)
tmp15 = tmp14 / tmp13
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), out=buf1)
del buf0
return (reinterpret_tensor(buf1, (4, 4, 4, 16), (256, 64, 16, 1), 0), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class LinearCaps(nn.Module):
def __init__(self, in_features, num_C, num_D, bias=False, eps=0.0001):
super(LinearCaps, self).__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
if bias:
self.bias = Parameter(torch.Tensor(num_C * num_D))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
scalar = torch.sqrt(torch.sum(self.weight * self.weight, dim=1))
scalar = torch.reciprocal(scalar + self.eps)
scalar = torch.unsqueeze(scalar, dim=1)
output = F.linear(x, scalar * self.weight, self.bias)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'num_C': 4, 'num_D': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.0001
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 1, tl.int32)
tmp15 = tmp14 / tmp13
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 16), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 16), (256, 64, 16, 1), 0
), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class LinearCapsNew(nn.Module):
def __init__(self, in_features, num_C, num_D, bias=False, eps=0.0001):
super(LinearCapsNew, self).__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
if bias:
self.bias = Parameter(torch.Tensor(num_C * num_D))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | LinearCaps | false | 5,964 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, in_features, num_C, num_D, bias=False, eps=0.0001):
super().__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
if bias:
self.bias = Parameter(torch.Tensor(num_C * num_D))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
scalar = torch.sqrt(torch.sum(self.weight * self.weight, dim=1))
scalar = torch.reciprocal(scalar + self.eps)
scalar = torch.unsqueeze(scalar, dim=1)
output = F.linear(x, scalar * self.weight, self.bias)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dk/cdkfylhbmav6z7nn6vy4nwynu3ry6ilbxpjspabpseiuux7csyql.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_3 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_group_norm_0 = async_compile.triton('triton_poi_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kd/ckdswb4lufwiij2wklkypvchblxzpif4ph4uxoqoklvcbijmlski.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_3 => add_1, mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_4), kwargs = {})
triton_poi_fused_native_group_norm_1 = async_compile.triton('triton_poi_fused_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 4)
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool3d_with_indices]
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2, 2, 2], [2, 2, 2])
del primals_1
buf1 = buf0[0]
del buf0
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_group_norm_0.run(buf3, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_1.run(buf3, buf4, buf5, primals_3, primals_4, buf6, 64, grid=grid(64), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf8 = buf5; del buf5 # reuse
buf9 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_0.run(buf7, buf8, buf9, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_1.run(buf7, buf8, buf9, primals_6, primals_7, buf10, 64, grid=grid(64), stream=stream0)
del buf8
del buf9
del primals_7
return (buf10, primals_2, primals_3, primals_5, primals_6, reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, encoder, kernel_size=3,
order='crg', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = (conv1_out_channels,
out_channels)
else:
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
self.add_module('SingleConv1', SingleConv(conv1_in_channels,
conv1_out_channels, kernel_size, order, num_groups))
self.add_module('SingleConv2', SingleConv(conv2_in_channels,
conv2_out_channels, kernel_size, order, num_groups))
class Encoder(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, conv_kernel_size=3,
apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max',
basic_module=DoubleConv, conv_layer_order='crg', num_groups=8):
super(Encoder, self).__init__()
assert pool_type in ['max', 'avg']
if apply_pooling:
if pool_type == 'max':
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(in_channels, out_channels, encoder
=True, kernel_size=conv_kernel_size, order=conv_layer_order,
num_groups=num_groups)
def forward(self, x):
if self.pooling is not None:
x = self.pooling(x)
x = self.basic_module(x)
return x
def get_inputs():
return [torch.rand([4, 8, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2,
2, 2], [2, 2, 2])
del primals_1
buf1 = buf0[0]
del buf0
buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4,
2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_group_norm_0[grid(16)](buf3, buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(64)](buf3, buf4, buf5,
primals_3, primals_4, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf8 = buf5
del buf5
buf9 = buf4
del buf4
triton_poi_fused_native_group_norm_0[grid(16)](buf7, buf8, buf9, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(64)](buf7, buf8, buf9,
primals_6, primals_7, buf10, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf8
del buf9
del primals_7
return (buf10, primals_2, primals_3, primals_5, primals_6,
reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0),
buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1),
0), buf7)
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, encoder, kernel_size=3,
order='crg', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = (conv1_out_channels,
out_channels)
else:
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
self.add_module('SingleConv1', SingleConv(conv1_in_channels,
conv1_out_channels, kernel_size, order, num_groups))
self.add_module('SingleConv2', SingleConv(conv2_in_channels,
conv2_out_channels, kernel_size, order, num_groups))
class EncoderNew(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, conv_kernel_size=3,
apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max',
basic_module=DoubleConv, conv_layer_order='crg', num_groups=8):
super(EncoderNew, self).__init__()
assert pool_type in ['max', 'avg']
if apply_pooling:
if pool_type == 'max':
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(in_channels, out_channels, encoder
=True, kernel_size=conv_kernel_size, order=conv_layer_order,
num_groups=num_groups)
def forward(self, input_0):
primals_2 = self.basic_module.SingleConv1.conv.weight
primals_3 = self.basic_module.SingleConv1.groupnorm.weight
primals_4 = self.basic_module.SingleConv1.groupnorm.bias
primals_5 = self.basic_module.SingleConv2.conv.weight
primals_6 = self.basic_module.SingleConv2.groupnorm.weight
primals_7 = self.basic_module.SingleConv2.groupnorm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | Encoder | false | 5,965 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super().__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out
# ... truncated (>4000 chars) for memory efficiency |
OutputTransition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncyyp5w7ugisohxqkosctv5wc5f7u7cfra24vd6r5vixoiwmbl6.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class OutputTransition(nn.Module):
"""
Decoder output layer
output the prediction of segmentation result
"""
def __init__(self, inChans, outChans):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(in_channels=inChans, out_channels=outChans,
kernel_size=1)
self.actv1 = torch.sigmoid
def forward(self, x):
return self.actv1(self.conv1(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inChans': 4, 'outChans': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4),
(256, 64, 16, 4, 1), 0), buf1
class OutputTransitionNew(nn.Module):
"""
Decoder output layer
output the prediction of segmentation result
"""
def __init__(self, inChans, outChans):
super(OutputTransitionNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels=inChans, out_channels=outChans,
kernel_size=1)
self.actv1 = torch.sigmoid
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | OutputTransition | false | 5,966 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
class Model(nn.Module):
"""
Decoder output layer
output the prediction of segmentation result
"""
def __init__(self, inChans, outChans):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=inChans, out_channels=outChans,
kernel_size=1)
self.actv1 = torch.sigmoid
def forward(self, x):
return self.actv1(self.conv1(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_12, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.checkpoint
from torch.nn import Parameter
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights += attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.checkpoint
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| Wang-Chuanyu/MMSA | MultiheadAttention | false | 5,967 | [
"MIT"
] | 1 | 2a720530c369e68656102287edb651780e827135 | https://github.com/Wang-Chuanyu/MMSA/tree/2a720530c369e68656102287edb651780e827135 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.checkpoint
from torch.nn import Parameter
class Model(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v,
# ... truncated (>4000 chars) for memory efficiency |
Relu_Adpt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6i/c6irug2zijhsre6i7x7vj3d7ab4lvwwhriahoek2hwnicmn6fkmi.py
# Topologically Sorted Source Nodes: [sub, relu], Original ATen: [aten.sub, aten.relu]
# Source node to ATen node mapping:
# relu => relu
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
triton_poi_fused_relu_sub_0 = async_compile.triton('triton_poi_fused_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x3)), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + (64*x3)), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + (64*x3)), xmask)
tmp12 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp13 = tmp11 - tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/64/c6454pzbvwqda5hndlwtrahibn3volp4lzkne3vqs2dhgibcj3m3.py
# Topologically Sorted Source Nodes: [sub, relu, mul_1, add, x_caps_1], Original ATen: [aten.sub, aten.relu, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mul_1 => mul_1
# relu => relu
# sub => sub
# x_caps_1 => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %primals_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, 0.0001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_mul_relu_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_relu_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_relu_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 0.0001
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 256, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, relu], Original ATen: [aten.sub, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, relu, mul_1, add, x_caps_1], Original ATen: [aten.sub, aten.relu, aten.mul, aten.add, aten.div]
triton_poi_fused_add_div_mul_relu_sub_1.run(buf0, primals_1, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
return (reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 16, 4, 1), 0), primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Relu_Adpt(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super(Relu_Adpt, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.theta = Parameter(torch.Tensor(1, self.num_C, 1, 1, 1))
self.theta.data.fill_(0.0)
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_C': 4, 'num_D': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x3), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x3), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x3), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x3), xmask)
tmp12 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp13 = tmp11 - tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_relu_sub_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 0.0001
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 256, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sub_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_div_mul_relu_sub_1[grid(1024)](buf0, primals_1,
buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 16, 4, 1), 0
), primals_1, primals_2
class Relu_AdptNew(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super(Relu_AdptNew, self).__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.theta = Parameter(torch.Tensor(1, self.num_C, 1, 1, 1))
self.theta.data.fill_(0.0)
def forward(self, input_0):
primals_2 = self.theta
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | Relu_Adpt | false | 5,968 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_C, num_D, eps=0.0001):
super().__init__()
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.theta = Parameter(torch.Tensor(1, self.num_C, 1, 1, 1))
self.theta.data.fill_(0.0)
def forward(self, x):
x_caps = x.view(x.shape[0], self.num_C, self.num_D, x.shape[2], x.
shape[3])
x_length = torch.sqrt(torch.sum(x_caps * x_caps, dim=2))
x_length = torch.unsqueeze(x_length, 2)
x_caps = F.relu(x_length - self.theta) * x_caps / (x_length + self.eps)
x = x_caps.view(x.shape[0], -1, x.shape[2], x.shape[3])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/46/c46mg7rvdztu6n5oosf5c4if7ziag6obrxhwbn43lcdfibfuom7w.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2s/c2s2pec3vduo4xn2kfu53hypzbhir2ql56lmvazfmymhwv2ehhv5.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf2, 64, grid=grid(64), stream=stream0)
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(64)](buf2, 64, XBLOCK=64, num_warps
=1, num_stages=1)
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttentionNew, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| WhuEven/multi_hyp_cc | SpatialAttention | false | 5,969 | [
"MIT"
] | 1 | 53a6bc438b865d606f5e6a53a442efbd8a04fe5b | https://github.com/WhuEven/multi_hyp_cc/tree/53a6bc438b865d606f5e6a53a442efbd8a04fe5b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
UpBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nb/cnbca5sshbm5zgfgicbddaejzk5thqksd5raak3rakwfnspwfrig.py
# Topologically Sorted Source Nodes: [upsample1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# upsample1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add, add_10, add_11, add_12, add_6, add_7, add_8, add_9, clamp_max_3, clamp_max_4, clamp_max_5, clamp_min, clamp_min_3, clamp_min_4, clamp_min_5, convert_element_type, convert_element_type_1, convert_element_type_3, convert_element_type_5, iota, mul, mul_3, mul_4, mul_5, mul_6, mul_7, mul_8, mul_9, sub, sub_10, sub_11, sub_12, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=4] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_1, torch.int64), kwargs = {})
# %convert_element_type_5 : [num_users=6] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3, %clamp_max_2]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1, %clamp_max_2]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3, %clamp_max_2]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1, %clamp_max_2]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_5), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=4] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_3), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %clamp_max_3), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_4), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_3), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_6), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %convert_element_type_3), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0.0), kwargs = {})
# %clamp_max_4 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_4, 1.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_7, %add_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_4), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %add_8), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_4), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_8), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_11, 0.0), kwargs = {})
# %clamp_max_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_5, 1.0), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %add_10), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_5), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_9), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr1, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 64) % 8
x1 = (xindex // 8) % 8
x0 = xindex % 8
x3 = (xindex // 512)
x5 = xindex
tmp0 = x2
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = x1
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp10 + tmp2
tmp12 = tmp11 * tmp2
tmp13 = tmp12 - tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp6)
tmp15 = tmp14.to(tl.int32)
tmp16 = x0
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp17 + tmp2
tmp19 = tmp18 * tmp2
tmp20 = tmp19 - tmp2
tmp21 = triton_helpers.maximum(tmp20, tmp6)
tmp22 = tmp21.to(tl.int32)
tmp23 = tl.full([1], 1, tl.int64)
tmp24 = tmp22 + tmp23
tmp25 = tl.full([1], 3, tl.int64)
tmp26 = triton_helpers.minimum(tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (tmp26 + (4*tmp15) + (16*tmp8) + (64*x3)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp22 + (4*tmp15) + (16*tmp8) + (64*x3)), None, eviction_policy='evict_last')
tmp29 = tmp27 - tmp28
tmp30 = tmp22.to(tl.float32)
tmp31 = tmp21 - tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp6)
tmp33 = 1.0
tmp34 = triton_helpers.minimum(tmp32, tmp33)
tmp35 = tmp29 * tmp34
tmp36 = tmp8 + tmp23
tmp37 = triton_helpers.minimum(tmp36, tmp25)
tmp38 = tmp15 + tmp23
tmp39 = triton_helpers.minimum(tmp38, tmp25)
tmp40 = tl.load(in_ptr0 + (tmp26 + (4*tmp39) + (16*tmp37) + (64*x3)), None, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (tmp26 + (4*tmp39) + (16*tmp8) + (64*x3)), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (tmp22 + (4*tmp39) + (16*tmp8) + (64*x3)), None, eviction_policy='evict_last')
tmp43 = tmp41 - tmp42
tmp44 = tmp43 * tmp34
tmp45 = tmp42 + tmp44
tmp46 = tl.load(in_ptr0 + (tmp26 + (4*tmp15) + (16*tmp37) + (64*x3)), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (tmp22 + (4*tmp15) + (16*tmp37) + (64*x3)), None, eviction_policy='evict_last')
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp34
tmp50 = tmp47 + tmp49
tmp51 = tl.load(in_ptr0 + (tmp22 + (4*tmp39) + (16*tmp37) + (64*x3)), None, eviction_policy='evict_last')
tmp52 = tmp40 - tmp51
tmp53 = tmp52 * tmp34
tmp54 = tmp51 + tmp53
tmp55 = tmp28 + tmp35
tmp56 = tmp45 - tmp55
tmp57 = tmp15.to(tl.float32)
tmp58 = tmp14 - tmp57
tmp59 = triton_helpers.maximum(tmp58, tmp6)
tmp60 = triton_helpers.minimum(tmp59, tmp33)
tmp61 = tmp56 * tmp60
tmp62 = tmp54 - tmp50
tmp63 = tmp62 * tmp60
tmp64 = tmp50 + tmp63
tmp65 = tmp55 + tmp61
tmp66 = tmp64 - tmp65
tmp67 = tmp8.to(tl.float32)
tmp68 = tmp7 - tmp67
tmp69 = triton_helpers.maximum(tmp68, tmp6)
tmp70 = triton_helpers.minimum(tmp69, tmp33)
tmp71 = tmp66 * tmp70
tmp72 = tmp65 + tmp71
tl.store(in_out_ptr1 + (x5), tmp72, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pr/cpr5ipr4tio67qyjwchplqvfau3ehryxnhuzkplrchppzclithjy.py
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# upsample_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_12, %primals_2, %primals_3, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 512) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 4, 8, 8, 8), (2048, 512, 64, 8, 1), torch.float32)
buf5 = buf4; del buf4 # reuse
buf9 = buf5; del buf5 # reuse
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [upsample1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf10, primals_1, 8192, grid=grid(8192), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 8, 8, 8), (2048, 512, 64, 8, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf12, primals_3, 8192, grid=grid(8192), stream=stream0)
del primals_3
return (buf12, primals_2, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class UpBlock(nn.Module):
"""
A module down sample the feature map
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, input_channels, output_channels):
super(UpBlock, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
self.conv1 = conv3d(input_channels, output_channels, kernel_size=1,
bias=True, padding=0)
def forward(self, x):
_, _c, w, h, d = x.size()
upsample1 = F.upsample(x, [2 * w, 2 * h, 2 * d], mode='trilinear')
upsample = self.conv1(upsample1)
return upsample
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 64 % 8
x1 = xindex // 8 % 8
x0 = xindex % 8
x3 = xindex // 512
x5 = xindex
tmp0 = x2
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = x1
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp10 + tmp2
tmp12 = tmp11 * tmp2
tmp13 = tmp12 - tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp6)
tmp15 = tmp14.to(tl.int32)
tmp16 = x0
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp17 + tmp2
tmp19 = tmp18 * tmp2
tmp20 = tmp19 - tmp2
tmp21 = triton_helpers.maximum(tmp20, tmp6)
tmp22 = tmp21.to(tl.int32)
tmp23 = tl.full([1], 1, tl.int64)
tmp24 = tmp22 + tmp23
tmp25 = tl.full([1], 3, tl.int64)
tmp26 = triton_helpers.minimum(tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (tmp26 + 4 * tmp15 + 16 * tmp8 + 64 * x3),
None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp22 + 4 * tmp15 + 16 * tmp8 + 64 * x3),
None, eviction_policy='evict_last')
tmp29 = tmp27 - tmp28
tmp30 = tmp22.to(tl.float32)
tmp31 = tmp21 - tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp6)
tmp33 = 1.0
tmp34 = triton_helpers.minimum(tmp32, tmp33)
tmp35 = tmp29 * tmp34
tmp36 = tmp8 + tmp23
tmp37 = triton_helpers.minimum(tmp36, tmp25)
tmp38 = tmp15 + tmp23
tmp39 = triton_helpers.minimum(tmp38, tmp25)
tmp40 = tl.load(in_ptr0 + (tmp26 + 4 * tmp39 + 16 * tmp37 + 64 * x3),
None, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (tmp26 + 4 * tmp39 + 16 * tmp8 + 64 * x3),
None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (tmp22 + 4 * tmp39 + 16 * tmp8 + 64 * x3),
None, eviction_policy='evict_last')
tmp43 = tmp41 - tmp42
tmp44 = tmp43 * tmp34
tmp45 = tmp42 + tmp44
tmp46 = tl.load(in_ptr0 + (tmp26 + 4 * tmp15 + 16 * tmp37 + 64 * x3),
None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (tmp22 + 4 * tmp15 + 16 * tmp37 + 64 * x3),
None, eviction_policy='evict_last')
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp34
tmp50 = tmp47 + tmp49
tmp51 = tl.load(in_ptr0 + (tmp22 + 4 * tmp39 + 16 * tmp37 + 64 * x3),
None, eviction_policy='evict_last')
tmp52 = tmp40 - tmp51
tmp53 = tmp52 * tmp34
tmp54 = tmp51 + tmp53
tmp55 = tmp28 + tmp35
tmp56 = tmp45 - tmp55
tmp57 = tmp15.to(tl.float32)
tmp58 = tmp14 - tmp57
tmp59 = triton_helpers.maximum(tmp58, tmp6)
tmp60 = triton_helpers.minimum(tmp59, tmp33)
tmp61 = tmp56 * tmp60
tmp62 = tmp54 - tmp50
tmp63 = tmp62 * tmp60
tmp64 = tmp50 + tmp63
tmp65 = tmp55 + tmp61
tmp66 = tmp64 - tmp65
tmp67 = tmp8.to(tl.float32)
tmp68 = tmp7 - tmp67
tmp69 = triton_helpers.maximum(tmp68, tmp6)
tmp70 = triton_helpers.minimum(tmp69, tmp33)
tmp71 = tmp66 * tmp70
tmp72 = tmp65 + tmp71
tl.store(in_out_ptr1 + x5, tmp72, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 512 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 4, 8, 8, 8), (2048, 512, 64, 8, 1),
torch.float32)
buf5 = buf4
del buf4
buf9 = buf5
del buf5
buf10 = buf9
del buf9
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(8192)](buf10, primals_1, 8192, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf11 = extern_kernels.convolution(buf10, primals_2, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 8, 8, 8), (2048, 512, 64, 8, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_1[grid(8192)](buf12, primals_3, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf12, primals_2, buf10
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class UpBlockNew(nn.Module):
"""
A module down sample the feature map
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, input_channels, output_channels):
super(UpBlockNew, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
self.conv1 = conv3d(input_channels, output_channels, kernel_size=1,
bias=True, padding=0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | UpBlock | false | 5,970 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
import torch.nn.functional as F
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class Model(nn.Module):
"""
A module down sample the feature map
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, input_channels, output_channels):
super().__init__()
self.input_channels = input_channels
self.output_channels = output_channels
self.conv1 = conv3d(input_channels, output_channels, kernel_size=1,
bias=True, padding=0)
def forward(self, x):
_, _c, w, h, d = x.size()
upsample1 = F.upsample(x, [2 * w, 2 * h, 2 * d], mode='trilinear')
upsample = self.conv1(upsample1)
return upsample
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GreenBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ke/ckev4oib3cv2oksvgsnixm2ir66mykxwkb2phwtnkc7jitdkle52.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => add, add_1, mul_1, rsqrt, var_mean
# x_1 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused_native_group_norm_relu_threshold_backward_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ex/cexccvjnx76cxpye4rdgovum364hqpnhyw7qtgoxiqjkkplj5quo.py
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.convolution, aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => add_2, add_3, mul_3, rsqrt_1, var_mean_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_7, %primals_6, %primals_7, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_13), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_10), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1 = async_compile.triton('triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp31, xmask)
tl.store(out_ptr3 + (r2 + (16*x3)), tmp33, xmask)
tl.store(out_ptr4 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hi/chi5de6wkhiuhrbwler5y57mp3zifkrpug7qo5wbq74vf32ceipa.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %squeeze_6), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [inp_res], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0.run(primals_3, primals_4, primals_5, buf1, buf4, buf15, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_4
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_6, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.convolution, aten.native_group_norm, aten.relu, aten.threshold_backward]
triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1.run(buf6, primals_7, primals_8, primals_9, buf7, buf11, buf14, buf10, 16, 16, grid=grid(16), stream=stream0)
del primals_7
del primals_9
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf13 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf13, primals_2, buf12, primals_11, 256, grid=grid(256), stream=stream0)
del buf12
del primals_11
del primals_2
return (buf13, primals_1, primals_3, primals_6, primals_8, primals_10, reinterpret_tensor(buf4, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(buf11, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf14, buf15, reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class GreenBlock(nn.Module):
"""
green_block(inp, filters, name=None)
------------------------------------
Implementation of the special residual block used in the paper. The block
consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)
units, with a residual connection from the input `inp` to the output. Used
internally in the model. Can be used independently as well.
Parameters
----------
`inp`: An keras.layers.layer instance, required
The keras layer just preceding the green block.
`filters`: integer, required
No. of filters to use in the 3D convolutional block. The output
layer of this green block will have this many no. of channels.
`data_format`: string, optional
The format of the input data. Must be either 'chanels_first' or
'channels_last'. Defaults to `channels_first`, as used in the paper.
`name`: string, optional
The name to be given to this green block. Defaults to None, in which
case, keras uses generated names for the involved layers. If a string
is provided, the names of individual layers are generated by attaching
a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed
by _1 or _2.
Returns
-------
`out`: A keras.layers.Layer instance
The output of the green block. Has no. of channels equal to `filters`.
The size of the rest of the dimensions remains same as in `inp`.
"""
def __init__(self, input_channels, output_channels):
super(GreenBlock, self).__init__()
self.conv3d_1 = conv3d(input_channels, output_channels, padding=0,
kernel_size=1, bias=True)
self.gn1 = nn.GroupNorm(input_channels, input_channels)
self.act_1 = nn.ReLU()
self.conv3d_2 = conv3d(input_channels, output_channels, kernel_size
=3, bias=True)
self.gn2 = nn.GroupNorm(output_channels, output_channels)
self.act_2 = nn.ReLU()
self.conv3d_3 = conv3d(output_channels, output_channels,
kernel_size=3, bias=True)
def forward(self, x):
inp_res = self.conv3d_1(x)
x = self.gn1(x)
x = self.act_1(x)
x = self.conv3d_2(x)
x = self.gn2(x)
x = self.act_2(x)
x = self.conv3d_3(x)
out = inp_res + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3,
out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp31, xmask)
tl.store(out_ptr3 + (r2 + 16 * x3), tmp33, xmask)
tl.store(out_ptr4 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(16)](
primals_3, primals_4, primals_5, buf1, buf4, buf15, buf16, 16,
16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_4
del primals_5
buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 4,
4, 4), (256, 64, 16, 4, 1), 0), primals_6, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_convolution_native_group_norm_relu_threshold_backward_1[
grid(16)](buf6, primals_7, primals_8, primals_9, buf7, buf11,
buf14, buf10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
del primals_9
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), primals_10, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf13 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_2[grid(256)](buf13, primals_2, buf12,
primals_11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del primals_11
del primals_2
return (buf13, primals_1, primals_3, primals_6, primals_8, primals_10,
reinterpret_tensor(buf4, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0),
buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0),
reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(
buf11, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf14, buf15,
reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0),
reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 1), 0))
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class GreenBlockNew(nn.Module):
"""
green_block(inp, filters, name=None)
------------------------------------
Implementation of the special residual block used in the paper. The block
consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)
units, with a residual connection from the input `inp` to the output. Used
internally in the model. Can be used independently as well.
Parameters
----------
`inp`: An keras.layers.layer instance, required
The keras layer just preceding the green block.
`filters`: integer, required
No. of filters to use in the 3D convolutional block. The output
layer of this green block will have this many no. of channels.
`data_format`: string, optional
The format of the input data. Must be either 'chanels_first' or
'channels_last'. Defaults to `channels_first`, as used in the paper.
`name`: string, optional
The name to be given to this green block. Defaults to None, in which
case, keras uses generated names for the involved layers. If a string
is provided, the names of individual layers are generated by attaching
a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed
by _1 or _2.
Returns
-------
`out`: A keras.layers.Layer instance
The output of the green block. Has no. of channels equal to `filters`.
The size of the rest of the dimensions remains same as in `inp`.
"""
def __init__(self, input_channels, output_channels):
super(GreenBlockNew, self).__init__()
self.conv3d_1 = conv3d(input_channels, output_channels, padding=0,
kernel_size=1, bias=True)
self.gn1 = nn.GroupNorm(input_channels, input_channels)
self.act_1 = nn.ReLU()
self.conv3d_2 = conv3d(input_channels, output_channels, kernel_size
=3, bias=True)
self.gn2 = nn.GroupNorm(output_channels, output_channels)
self.act_2 = nn.ReLU()
self.conv3d_3 = conv3d(output_channels, output_channels,
kernel_size=3, bias=True)
def forward(self, input_0):
primals_1 = self.conv3d_1.weight
primals_2 = self.conv3d_1.bias
primals_4 = self.gn1.weight
primals_5 = self.gn1.bias
primals_6 = self.conv3d_2.weight
primals_7 = self.conv3d_2.bias
primals_8 = self.gn2.weight
primals_9 = self.gn2.bias
primals_10 = self.conv3d_3.weight
primals_11 = self.conv3d_3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | GreenBlock | false | 5,971 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
class Model(nn.Module):
"""
green_block(inp, filters, name=None)
------------------------------------
Implementation of the special residual block used in the paper. The block
consists of two (GroupNorm --> ReLu --> 3x3x3 non-strided Convolution)
units, with a residual connection from the input `inp` to the output. Used
internally in the model. Can be used independently as well.
Parameters
----------
`inp`: An keras.layers.layer instance, required
The keras layer just preceding the green block.
`filters`: integer, required
No. of filters to use in the 3D convolutional block. The output
layer of this green block will have this many no. of channels.
`data_format`: string, optional
The format of the input data. Must be either 'chanels_first' or
'channels_last'. Defaults to `channels_first`, as used in the paper.
`name`: string, optional
The name to be given to this green block. Defaults to None, in which
case, keras uses generated names for the involved layers. If a string
is provided, the names of individual layers are generated by attaching
a relevant prefix from [GroupNorm_, Res_, Conv3D_, Relu_, ], followed
by _1 or _2.
Returns
-------
`out`: A keras.layers.Layer instance
The output of the green block. Has no. of channels equal to `filters`.
The size of the rest of the dimensions remains same as in `inp`.
"""
def __init__(self, input_channels, output_channels):
super().__init__()
self.conv3d_1 = conv3d(input_channels, output_channels, padding=0,
kernel_size=1, bias=True)
self.gn1 = nn.GroupNorm(input_channels, input_channels)
self.act_1 = nn.ReLU()
self.conv3d_2 = conv3d(input_channels, output_channels, kernel_size
=3, bias=True)
self.gn2 = nn.GroupNorm(output_channels, output_channels)
self.act_2 = nn.ReLU()
self.conv3d_3 = conv3d(output_channels, output_channels,
kernel_size=3, bias=True)
def forward(self, x):
inp_res = self.conv3d_1(x)
x = self.gn1(x)
x = self.act_1(x)
x = self.conv3d_2(x)
x = self.gn2(x)
x = self.act_2(x)
x = self.conv3d_3(x)
out = inp_res + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ExtResNetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cn/ccnjw5i2bmojhqd43gtvy2mo5rvcvx4kyqocflzm32zw36biefd7.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
# Source node to ATen node mapping:
# input_2 => add, add_1, mul_1, rsqrt, var_mean
# input_3 => expm1, gt, mul_2, mul_4, where
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_2, %mul_4), kwargs = {})
triton_per_fused_elu_native_group_norm_0 = async_compile.triton('triton_per_fused_elu_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_elu_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp34, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ql/cql5y77ikb3qyybctpvmmqkg56j3yahci6d3fpavhe5gjjt6labu.py
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
# Source node to ATen node mapping:
# input_8 => add_4, add_5, mul_11, rsqrt_2, var_mean_2
# out => add_6
# out_1 => expm1_2, gt_2, mul_12, mul_14, where_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_22), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %unsqueeze_19), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %where), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {})
# %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 1.0), kwargs = {})
# %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_12, %mul_14), kwargs = {})
triton_per_fused_add_elu_native_group_norm_1 = async_compile.triton('triton_per_fused_add_elu_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_elu_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp36, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4; del buf4 # reuse
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
stream0 = get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0.run(buf6, buf0, primals_3, primals_4, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11; del buf11 # reuse
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.native_group_norm, aten.elu]
triton_per_fused_elu_native_group_norm_0.run(buf13, buf7, primals_6, primals_7, buf8, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19; del buf19 # reuse
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
triton_per_fused_add_elu_native_group_norm_1.run(buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 16, 16, grid=grid(16), stream=stream0)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlock, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
residual = out
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp34, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp36, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4
del buf4
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0[grid(16)](buf6, buf0,
primals_3, primals_4, buf1, buf5, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11
del buf11
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_elu_native_group_norm_0[grid(16)](buf13, buf7,
primals_6, primals_7, buf8, buf12, 16, 16, XBLOCK=8, num_warps=
2, num_stages=1)
del primals_7
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19
del buf19
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_add_elu_native_group_norm_1[grid(16)](buf20, buf14,
primals_9, primals_10, buf6, buf15, buf18, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8,
primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64,
16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7,
reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(
buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15,
(4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20
)
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlockNew(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlockNew, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_3 = self.conv1.groupnorm.weight
primals_4 = self.conv1.groupnorm.bias
primals_5 = self.conv2.conv.weight
primals_6 = self.conv2.groupnorm.weight
primals_7 = self.conv2.groupnorm.bias
primals_8 = self.conv3.conv.weight
primals_9 = self.conv3.groupnorm.weight
primals_10 = self.conv3.groupnorm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | ExtResNetBlock | false | 5,972 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import torch
from torch import nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias, stride=stride)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super().__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class Model(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init
# ... truncated (>4000 chars) for memory efficiency |
LeNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_4 => convolution_1
# x_5 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_6 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_8 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_9 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (6, ), (1, ))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 18816, grid=grid(18816), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class LeNet(torch.nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 6, kernel_size=5, padding=2)
self.conv2 = torch.nn.Conv2d(6, 16, kernel_size=5)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.flatten = nn.Flatten()
self.fc1 = torch.nn.Linear(16 * 5 * 5, 120)
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
def forward(self, x):
x = x.reshape(-1, 1, 28, 28)
x = self.conv1(x)
x = F.relu(x)
x = self.max_pooling(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pooling(x)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (6,), (1,))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_3,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class LeNetNew(torch.nn.Module):
def __init__(self):
super(LeNetNew, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 6, kernel_size=5, padding=2)
self.conv2 = torch.nn.Conv2d(6, 16, kernel_size=5)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.flatten = nn.Flatten()
self.fc1 = torch.nn.Linear(16 * 5 * 5, 120)
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| WingFeiTsang/FedML_New | LeNet | false | 5,973 | [
"Apache-2.0"
] | 1 | 755d8fc63ce08df4dc3eef326aa7693e94262c7e | https://github.com/WingFeiTsang/FedML_New/tree/755d8fc63ce08df4dc3eef326aa7693e94262c7e | import torch
from torch import nn
import torch.nn.functional as F
import torch.utils
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 6, kernel_size=5, padding=2)
self.conv2 = torch.nn.Conv2d(6, 16, kernel_size=5)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.flatten = nn.Flatten()
self.fc1 = torch.nn.Linear(16 * 5 * 5, 120)
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
def forward(self, x):
x = x.reshape(-1, 1, 28, 28)
x = self.conv1(x)
x = F.relu(x)
x = self.max_pooling(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pooling(x)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return []
|
LinearCapsPro | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sg/csgojewz7ivvkqgz2youcxebnnlwakpwuik2fyzzax45hdygrm6r.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.0001), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0001
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qs/cqsplu7aho4zbmtum5lhgvbgqxswrhhcfixbbw62plejld3qzbbs.py
# Topologically Sorted Source Nodes: [sigma_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# sigma_4 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %unsqueeze_3],), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp0 >= tmp8
tmp13 = tmp12 & tmp7
tmp14 = tl.load(in_ptr1 + (x0), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp11, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp7, tmp15, tmp16)
tmp18 = tmp0 >= tmp5
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr2 + (x0), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.where(tmp6, tmp17, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tmp25 = tl.full([1], 4, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tl.load(in_ptr3 + (x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.where(tmp4, tmp23, tmp27)
tl.store(out_ptr0 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uw/cuwlkalong3cwuxng3zurtt3tgbk7ithqpljqcehvo5fjqbtfwrm.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kl/ckllf6mazqhprwh2tau7vxrdurc2kivba4tykq3gqxvebmdet63s.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/77/c77uzurrxe65dwxrucifwcyja4kctf3y6ir7z3x44ooratb5zyqo.py
# Topologically Sorted Source Nodes: [sqrt], Original ATen: [aten.sqrt]
# Source node to ATen node mapping:
# sqrt => sqrt
# Graph fragment:
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%squeeze_1,), kwargs = {})
triton_poi_fused_sqrt_4 = async_compile.triton('triton_poi_fused_sqrt_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sqrt_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sqrt_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
# Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.linalg_inv_ex]
buf2 = torch.ops.aten.linalg_inv_ex.default(buf1)
buf3 = buf2[0]
del buf2
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf5)
# Topologically Sorted Source Nodes: [sigma_], Original ATen: [aten.linalg_inv_ex]
buf6 = torch.ops.aten.linalg_inv_ex.default(buf5)
buf7 = buf6[0]
del buf6
buf9 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf9)
# Topologically Sorted Source Nodes: [sigma__2], Original ATen: [aten.linalg_inv_ex]
buf10 = torch.ops.aten.linalg_inv_ex.default(buf9)
buf11 = buf10[0]
del buf10
buf13 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_1, (4, 4), (1, 4), 48), alpha=1, beta=1, out=buf13)
del buf0
# Topologically Sorted Source Nodes: [sigma__4], Original ATen: [aten.linalg_inv_ex]
buf14 = torch.ops.aten.linalg_inv_ex.default(buf13)
buf15 = buf14[0]
del buf14
buf17 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sigma_4], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf3, buf7, buf11, buf15, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf17, buf19, 256, grid=grid(256), stream=stream0)
buf20 = reinterpret_tensor(buf17, (16, 1, 4), (4, 4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(primals_1, buf21, 256, grid=grid(256), stream=stream0)
buf22 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf20, reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), out=buf22)
buf23 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 1), (4, 1, 1), 0), out=buf23)
del buf22
buf24 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [sqrt], Original ATen: [aten.sqrt]
triton_poi_fused_sqrt_4.run(buf24, 16, grid=grid(16), stream=stream0)
return (buf24, primals_3, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf24, reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
class LinearCapsPro(nn.Module):
def __init__(self, in_features, num_C, num_D, eps=0.0001):
super(LinearCapsPro, self).__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, x, eye):
weight_caps = self.weight[:self.num_D]
sigma = torch.inverse(torch.mm(weight_caps, torch.t(weight_caps)) +
self.eps * eye)
sigma = torch.unsqueeze(sigma, dim=0)
for ii in range(1, self.num_C):
weight_caps = self.weight[ii * self.num_D:(ii + 1) * self.num_D]
sigma_ = torch.inverse(torch.mm(weight_caps, torch.t(
weight_caps)) + self.eps * eye)
sigma_ = torch.unsqueeze(sigma_, dim=0)
sigma = torch.cat((sigma, sigma_))
out = torch.matmul(x, torch.t(self.weight))
out = out.view(out.shape[0], self.num_C, 1, self.num_D)
out = torch.matmul(out, sigma)
out = torch.matmul(out, self.weight.view(self.num_C, self.num_D,
self.in_features))
out = torch.squeeze(out, dim=2)
out = torch.matmul(out, torch.unsqueeze(x, dim=2))
out = torch.squeeze(out, dim=2)
return torch.sqrt(out)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'num_C': 4, 'num_D': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0001
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + x0, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp0 >= tmp8
tmp13 = tmp12 & tmp7
tmp14 = tl.load(in_ptr1 + x0, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp11, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp7, tmp15, tmp16)
tmp18 = tmp0 >= tmp5
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr2 + x0, tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.where(tmp6, tmp17, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp27 = tl.load(in_ptr3 + x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.where(tmp4, tmp23, tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_sqrt_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf1)
buf2 = torch.ops.aten.linalg_inv_ex.default(buf1)
buf3 = buf2[0]
del buf2
buf5 = buf1
del buf1
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4,
1), 16), reinterpret_tensor(primals_1, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf5)
buf6 = torch.ops.aten.linalg_inv_ex.default(buf5)
buf7 = buf6[0]
del buf6
buf9 = buf5
del buf5
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4,
1), 32), reinterpret_tensor(primals_1, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf9)
buf10 = torch.ops.aten.linalg_inv_ex.default(buf9)
buf11 = buf10[0]
del buf10
buf13 = buf9
del buf9
extern_kernels.addmm(buf0, reinterpret_tensor(primals_1, (4, 4), (4,
1), 48), reinterpret_tensor(primals_1, (4, 4), (1, 4), 48),
alpha=1, beta=1, out=buf13)
del buf0
buf14 = torch.ops.aten.linalg_inv_ex.default(buf13)
buf15 = buf14[0]
del buf14
buf17 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_cat_1[grid(64)](buf3, buf7, buf11, buf15, buf17,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16),
(1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf17, buf19, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf17, (16, 1, 4), (4, 4, 1), 0)
del buf17
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 1, 4), (4, 4, 1),
0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20
)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(256)](primals_1, buf21, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf22 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(buf20, reinterpret_tensor(buf21, (16, 4, 4), (16,
4, 1), 0), out=buf22)
buf23 = reinterpret_tensor(buf13, (4, 4, 1), (4, 1, 1), 0)
del buf13
extern_kernels.bmm(reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (4, 4, 1), (4, 1, 1), 0), out
=buf23)
del buf22
buf24 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
triton_poi_fused_sqrt_4[grid(16)](buf24, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf24, primals_3, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48
), buf24, reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf21, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), reinterpret_tensor(buf7, (4, 4), (4, 1), 0
), reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
class LinearCapsProNew(nn.Module):
def __init__(self, in_features, num_C, num_D, eps=0.0001):
super(LinearCapsProNew, self).__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| WdBlink/AugMix-3DOCUNet-Brats2019 | LinearCapsPro | false | 5,974 | [
"MIT"
] | 1 | 125c6c8682b51a550eeac9173d13d0a211576abc | https://github.com/WdBlink/AugMix-3DOCUNet-Brats2019/tree/125c6c8682b51a550eeac9173d13d0a211576abc | import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, in_features, num_C, num_D, eps=0.0001):
super().__init__()
self.in_features = in_features
self.num_C = num_C
self.num_D = num_D
self.eps = eps
self.weight = Parameter(torch.Tensor(num_C * num_D, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, x, eye):
weight_caps = self.weight[:self.num_D]
sigma = torch.inverse(torch.mm(weight_caps, torch.t(weight_caps)) +
self.eps * eye)
sigma = torch.unsqueeze(sigma, dim=0)
for ii in range(1, self.num_C):
weight_caps = self.weight[ii * self.num_D:(ii + 1) * self.num_D]
sigma_ = torch.inverse(torch.mm(weight_caps, torch.t(
weight_caps)) + self.eps * eye)
sigma_ = torch.unsqueeze(sigma_, dim=0)
sigma = torch.cat((sigma, sigma_))
out = torch.matmul(x, torch.t(self.weight))
out = out.view(out.shape[0], self.num_C, 1, self.num_D)
out = torch.matmul(out, sigma)
out = torch.matmul(out, self.weight.view(self.num_C, self.num_D,
self.in_features))
out = torch.squeeze(out, dim=2)
out = torch.matmul(out, torch.unsqueeze(x, dim=2))
out = torch.squeeze(out, dim=2)
return torch.sqrt(out)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ConvertPointsFromHomogeneous | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hf/chfgbza2ejy2u6zh4sp5dzw6tvq6mxgnlub6k2hbye722pxlzgxg.py
# Topologically Sorted Source Nodes: [add, truediv], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_1, %add), kwargs = {})
triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, truediv], Original ATen: [aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def convert_points_from_homogeneous(points, eps=1e-06):
"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return points[..., :-1] / (points[..., -1:] + eps)
class ConvertPointsFromHomogeneous(nn.Module):
"""Creates a transformation that converts points from homogeneous to
Euclidean space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N-1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsFromHomogeneous()
>>> output = transform(input) # BxNx2
"""
def __init__(self):
super(ConvertPointsFromHomogeneous, self).__init__()
def forward(self, input):
return convert_points_from_homogeneous(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def convert_points_from_homogeneous(points, eps=1e-06):
"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return points[..., :-1] / (points[..., -1:] + eps)
class ConvertPointsFromHomogeneousNew(nn.Module):
"""Creates a transformation that converts points from homogeneous to
Euclidean space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N-1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsFromHomogeneous()
>>> output = transform(input) # BxNx2
"""
def __init__(self):
super(ConvertPointsFromHomogeneousNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Wizaron/torchgeometry | ConvertPointsFromHomogeneous | false | 5,975 | [
"Apache-2.0"
] | 1 | 59a8d25dd811ded6a139d5c0c2442b06f43dc775 | https://github.com/Wizaron/torchgeometry/tree/59a8d25dd811ded6a139d5c0c2442b06f43dc775 | import torch
import torch.nn as nn
def convert_points_from_homogeneous(points, eps=1e-06):
"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return points[..., :-1] / (points[..., -1:] + eps)
class Model(nn.Module):
"""Creates a transformation that converts points from homogeneous to
Euclidean space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N-1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsFromHomogeneous()
>>> output = transform(input) # BxNx2
"""
def __init__(self):
super().__init__()
def forward(self, input):
return convert_points_from_homogeneous(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CPUForgetMult | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2s/c2srlvu3ig3l4rt4br537nskxrm55ugw7myg2outhdk64rd63rae.py
# Topologically Sorted Source Nodes: [sub_1, mul_2, h_3, stack], Original ATen: [aten.rsub, aten.mul, aten.add, aten.stack]
# Source node to ATen node mapping:
# h_3 => add_1
# mul_2 => mul_2
# stack => cat
# sub_1 => sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, %mul_2), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3],), kwargs = {})
triton_poi_fused_add_mul_rsub_stack_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_stack_0(in_ptr0, in_ptr1, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp1 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp6 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr0 + (x0), xmask)
tmp10 = tl.load(in_ptr1 + (x0), xmask)
tmp16 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp17 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp7 = tmp5 * tmp6
tmp8 = tmp3 - tmp5
tmp11 = tmp9 * tmp10
tmp12 = tmp8 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp4 * tmp13
tmp15 = tmp2 + tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp16
tmp20 = tmp19 * tmp15
tmp21 = tmp18 + tmp20
tl.store(out_ptr1 + (x0), tmp13, xmask)
tl.store(out_ptr2 + (x0), tmp11, xmask)
tl.store(out_ptr3 + (x0), tmp15, xmask)
tl.store(out_ptr4 + (x0), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 64) # alias
buf1 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) # alias
buf3 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 128) # alias
buf4 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 192) # alias
# Topologically Sorted Source Nodes: [sub_1, mul_2, h_3, stack], Original ATen: [aten.rsub, aten.mul, aten.add, aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_stack_0.run(arg0_1, arg1_1, buf2, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import *
class CPUForgetMult(torch.nn.Module):
def __init__(self):
super(CPUForgetMult, self).__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_stack_0(in_ptr0, in_ptr1, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp1 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp6 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr0 + x0, xmask)
tmp10 = tl.load(in_ptr1 + x0, xmask)
tmp16 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp17 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp7 = tmp5 * tmp6
tmp8 = tmp3 - tmp5
tmp11 = tmp9 * tmp10
tmp12 = tmp8 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp4 * tmp13
tmp15 = tmp2 + tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp16
tmp20 = tmp19 * tmp15
tmp21 = tmp18 + tmp20
tl.store(out_ptr1 + x0, tmp13, xmask)
tl.store(out_ptr2 + x0, tmp11, xmask)
tl.store(out_ptr3 + x0, tmp15, xmask)
tl.store(out_ptr4 + x0, tmp21, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 64)
buf1 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
buf3 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 128)
buf4 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 192)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_stack_0[grid(64)](arg0_1, arg1_1,
buf2, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class CPUForgetMultNew(torch.nn.Module):
def __init__(self):
super(CPUForgetMultNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| WittmannF/fastai_docs | CPUForgetMult | false | 5,976 | [
"Apache-2.0"
] | 1 | 03ecae01557a5e4a196dd858b10a57b224df52cd | https://github.com/WittmannF/fastai_docs/tree/03ecae01557a5e4a196dd858b10a57b224df52cd | import torch
from typing import *
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LearnedPositionalEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5i/c5iybmnijeaxq3pumkl5crtkns462pwdrh72bxy4lcvnlh3r4364.py
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# mask => convert_element_type
# ne => ne
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%convert_element_type, 1), kwargs = {})
triton_per_fused__to_copy_cumsum_ne_0 = async_compile.triton('triton_per_fused__to_copy_cumsum_ne_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_cumsum_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ft/cftxaavy7b7scxgnrhfsvfnicvimxnf3kpckow5nzkyed3meyoli.py
# Topologically Sorted Source Nodes: [ne, mask, type_as, mul, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
# Source node to ATen node mapping:
# long => convert_element_type_2
# mask => convert_element_type
# mul => mul
# ne => ne
# positions => add
# type_as => convert_element_type_1
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%primals_1, 4), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int32), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cumsum, torch.int32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %convert_element_type), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul, torch.int64), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 4), kwargs = {})
triton_poi_fused__to_copy_add_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_add_mul_ne_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_mul_ne_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/in/cinglqnf6mtochspmiolvr3bqay6yiivzgqlihpkdlbd5p4ccw54.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add, 4), kwargs = {})
triton_poi_fused_embedding_2 = async_compile.triton('triton_poi_fused_embedding_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 9)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 9")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [ne, mask, cumsum], Original ATen: [aten.ne, aten._to_copy, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0.run(primals_1, buf0, 64, 4, grid=grid(64), stream=stream0)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [ne, mask, type_as, mul, long, positions], Original ATen: [aten.ne, aten._to_copy, aten.mul, aten.add]
triton_poi_fused__to_copy_add_mul_ne_1.run(buf1, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_2.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((9, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input: 'torch.Tensor'):
"""Input is expected to be of size [bsz x seqlen]."""
if input.size(1) > self.max_positions:
raise ValueError(
f'Sequence length {input.size(1)} above maximum sequence length of {self.max_positions}'
)
mask = input.ne(self.padding_idx).int()
positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long(
) + self.padding_idx
return F.embedding(positions, self.weight, self.padding_idx, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask,
'index out of bounds: 0 <= tmp4 < 9')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = buf0
del buf0
triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf1
class LearnedPositionalEmbeddingNew(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| William-Zhanng/Protein_affinity | LearnedPositionalEmbedding | false | 5,977 | [
"MIT"
] | 1 | 8abd12073b182274bf464ff23fd3be406c4e39ac | https://github.com/William-Zhanng/Protein_affinity/tree/8abd12073b182274bf464ff23fd3be406c4e39ac | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input: 'torch.Tensor'):
"""Input is expected to be of size [bsz x seqlen]."""
if input.size(1) > self.max_positions:
raise ValueError(
f'Sequence length {input.size(1)} above maximum sequence length of {self.max_positions}'
)
mask = input.ne(self.padding_idx).int()
positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long(
) + self.padding_idx
return F.embedding(positions, self.weight, self.padding_idx, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AdaptiveConcatPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k4/ck4n5tdf6davbcws4dl4srbcvwr32b7mii5iws2ikdlxcnct5azp.py
# Topologically Sorted Source Nodes: [adaptive_max_pool2d], Original ATen: [aten.adaptive_max_pool2d]
# Source node to ATen node mapping:
# adaptive_max_pool2d => adaptive_max_pool2d
# Graph fragment:
# %adaptive_max_pool2d : [num_users=1] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%arg0_1, [1, 1]), kwargs = {})
triton_poi_fused_adaptive_max_pool2d_0 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bq/cbq374nmg2wjieph77t53feytfu37kp7eyuymtfg6in2myzkzehm.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool2d => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + (8*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0) # alias
# Topologically Sorted Source Nodes: [adaptive_max_pool2d], Original ATen: [aten.adaptive_max_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4) # alias
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
triton_per_fused_mean_1.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import *
from typing import Optional
class AdaptiveConcatPool2d(nn.Module):
"""Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"""
def __init__(self, sz: 'Optional[int]'=None):
"""Output will be 2*sz or 2 if sz is None"""
super().__init__()
sz = sz or 1
self.ap, self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from typing import *
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
class AdaptiveConcatPool2dNew(nn.Module):
"""Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"""
def __init__(self, sz: 'Optional[int]'=None):
"""Output will be 2*sz or 2 if sz is None"""
super().__init__()
sz = sz or 1
self.ap, self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| WittmannF/fastai_docs | AdaptiveConcatPool2d | false | 5,978 | [
"Apache-2.0"
] | 1 | 03ecae01557a5e4a196dd858b10a57b224df52cd | https://github.com/WittmannF/fastai_docs/tree/03ecae01557a5e4a196dd858b10a57b224df52cd | import torch
from torch import nn
from typing import *
from typing import Optional
class Model(nn.Module):
"""Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"""
def __init__(self, sz: 'Optional[int]'=None):
"""Output will be 2*sz or 2 if sz is None"""
super().__init__()
sz = sz or 1
self.ap, self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/74/c74cgsnsrc4jmxl2qqe5j2mmuphscf6e56iqf2iy24pro6mj73sp.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.datasets import *
import torch.nn.functional as F
import torch.nn as nn
from torchvision.transforms import *
class Normalize(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(NormalizeNew, self).__init__()
self.p = p
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Womcos/SCARF | Normalize | false | 5,979 | [
"MIT"
] | 1 | b90251bc23410cb810a7082ca75147a7aae21dec | https://github.com/Womcos/SCARF/tree/b90251bc23410cb810a7082ca75147a7aae21dec | import torch
from torchvision.datasets import *
import torch.nn.functional as F
import torch.nn as nn
from torchvision.transforms import *
class Model(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super().__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Mean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k7/ck7f3e36x4bp7ysaeucdkbkabvflugky7lt72frthtbqzwdsmcfq.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [4]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class Mean(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MeanNew(nn.Module):
def __init__(self, dim, keep_dim=False):
super(MeanNew, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Womcos/SCARF | Mean | false | 5,980 | [
"MIT"
] | 1 | b90251bc23410cb810a7082ca75147a7aae21dec | https://github.com/Womcos/SCARF/tree/b90251bc23410cb810a7082ca75147a7aae21dec | import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class Model(nn.Module):
def __init__(self, dim, keep_dim=False):
super().__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Sum | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wq/cwq3pmymyuowy4xq7gkn2nwrwrq6bpjfz67zohrfhdkptqayuc4m.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [4]), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class Sum(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Sum, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.sum(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SumNew(nn.Module):
def __init__(self, dim, keep_dim=False):
super(SumNew, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Womcos/SCARF | Sum | false | 5,981 | [
"MIT"
] | 1 | b90251bc23410cb810a7082ca75147a7aae21dec | https://github.com/Womcos/SCARF/tree/b90251bc23410cb810a7082ca75147a7aae21dec | import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class Model(nn.Module):
def __init__(self, dim, keep_dim=False):
super().__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.sum(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
InvDepth | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w7/cw7ogushagw5rd6eixmm5u3erviyy7hf33356b3glzcg42jbcwxp.py
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_1, 0.04), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2.0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%primals_1, 0.04), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%primals_1, 2.0), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_ge_le_logical_and_0 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.04
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del primals_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InvDepth(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepth, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
return self.w.clamp(min=self._min_range, max=self._max_range)
def get_inputs():
return []
def get_init_inputs():
return [[], {'height': 4, 'width': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.04
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(16)](primals_1,
buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, buf1
class InvDepthNew(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepthNew, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
primals_1 = self.w
output = call([primals_1])
return output[0]
| Wizaron/torchgeometry | InvDepth | false | 5,982 | [
"Apache-2.0"
] | 1 | 59a8d25dd811ded6a139d5c0c2442b06f43dc775 | https://github.com/Wizaron/torchgeometry/tree/59a8d25dd811ded6a139d5c0c2442b06f43dc775 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super().__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
return self.w.clamp(min=self._min_range, max=self._max_range)
def get_inputs():
return []
def get_init_inputs():
return [4, 4]
|
ActivationBin | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vt/cvt5va4h5q4vsog6u4y2lbtpnebzhbv2cs6oarhmehuoszkkjfsd.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# output => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %relu), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr1 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0)
return (arg0_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
class BinaryActivation(Function):
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = torch.sign(input)
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1.0)] = 0
grad_input[input.le(-1.0)] = 0
"""
#******************soft_ste*****************
size = input.size()
zeros = torch.zeros(size).cuda()
grad = torch.max(zeros, 1 - torch.abs(input))
#print(grad)
grad_input = grad_output * grad
"""
return grad_input
class ActivationBin(nn.Module):
def __init__(self, A):
super(ActivationBin, self).__init__()
self.A = A
self.relu = nn.ReLU(inplace=True)
def binary(self, input):
output = BinaryActivation.apply(input)
return output
def forward(self, input):
if self.A == 2:
output = self.binary(input)
else:
output = self.relu(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'A': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr1 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return arg0_1,
class BinaryActivation(Function):
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = torch.sign(input)
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1.0)] = 0
grad_input[input.le(-1.0)] = 0
"""
#******************soft_ste*****************
size = input.size()
zeros = torch.zeros(size).cuda()
grad = torch.max(zeros, 1 - torch.abs(input))
#print(grad)
grad_input = grad_output * grad
"""
return grad_input
class ActivationBinNew(nn.Module):
def __init__(self, A):
super(ActivationBinNew, self).__init__()
self.A = A
self.relu = nn.ReLU(inplace=True)
def binary(self, input):
output = BinaryActivation.apply(input)
return output
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Wulingtian/micronet | ActivationBin | false | 5,983 | [
"MIT"
] | 1 | d04298bced90258d38a6455a743aa0b55a12852e | https://github.com/Wulingtian/micronet/tree/d04298bced90258d38a6455a743aa0b55a12852e | from torch.autograd import Function
import torch
import torch.nn as nn
class BinaryActivation(Function):
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = torch.sign(input)
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1.0)] = 0
grad_input[input.le(-1.0)] = 0
"""
#******************soft_ste*****************
size = input.size()
zeros = torch.zeros(size).cuda()
grad = torch.max(zeros, 1 - torch.abs(input))
#print(grad)
grad_input = grad_output * grad
"""
return grad_input
class Model(nn.Module):
def __init__(self, A):
super().__init__()
self.A = A
self.relu = nn.ReLU(inplace=True)
def binary(self, input):
output = BinaryActivation.apply(input)
return output
def forward(self, input):
if self.A == 2:
output = self.binary(input)
else:
output = self.relu(input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
UpsampleConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torchvision.datasets import *
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn import Parameter
from torch.nn.modules.utils import _pair
from torchvision.transforms import *
class UpsampleConv2d(Module):
"""
To avoid the checkerboard artifacts of standard Fractionally-strided Convolution,
we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for
each convolutional window.
.. image:: _static/img/upconv.png
:width: 50%
:align: center
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Zero-padding added to one side of the output.
Default: 0
groups (int, optional): Number of blocked connections from input channels to output
channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
scale_factor (int): scaling factor for upsampling convolution. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]`
:math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels)
Examples:
>>> # With square kernels and equal stride
>>> m = nn.UpsampleCov2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = autograd.Variable(torch.randn(20, 16, 50, 100))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = autograd.Variable(torch.randn(1, 16, 12, 12))
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, scale_factor=1, bias=True):
super(UpsampleConv2d, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.scale_factor = scale_factor
self.weight = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
out = F.conv2d(input, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return F.pixel_shuffle(out, self.scale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
from torchvision.datasets import *
from torch.nn.parameter import Parameter
from torch.nn import Parameter
from torch.nn.modules.utils import _pair
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0
), primals_1, primals_3
class UpsampleConv2dNew(Module):
"""
To avoid the checkerboard artifacts of standard Fractionally-strided Convolution,
we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for
each convolutional window.
.. image:: _static/img/upconv.png
:width: 50%
:align: center
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Zero-padding added to one side of the output.
Default: 0
groups (int, optional): Number of blocked connections from input channels to output
channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
scale_factor (int): scaling factor for upsampling convolution. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]`
:math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels)
Examples:
>>> # With square kernels and equal stride
>>> m = nn.UpsampleCov2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = autograd.Variable(torch.randn(20, 16, 50, 100))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = autograd.Variable(torch.randn(1, 16, 12, 12))
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, scale_factor=1, bias=True):
super(UpsampleConv2dNew, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.scale_factor = scale_factor
self.weight = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Womcos/SCARF | UpsampleConv2d | false | 5,984 | [
"MIT"
] | 1 | b90251bc23410cb810a7082ca75147a7aae21dec | https://github.com/Womcos/SCARF/tree/b90251bc23410cb810a7082ca75147a7aae21dec | from torch.nn import Module
import math
import torch
from torchvision.datasets import *
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn import Parameter
from torch.nn.modules.utils import _pair
from torchvision.transforms import *
class Model(Module):
"""
To avoid the checkerboard artifacts of standard Fractionally-strided Convolution,
we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for
each convolutional window.
.. image:: _static/img/upconv.png
:width: 50%
:align: center
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Zero-padding added to one side of the output.
Default: 0
groups (int, optional): Number of blocked connections from input channels to output
channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
scale_factor (int): scaling factor for upsampling convolution. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]`
:math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels)
Examples:
>>> # With square kernels and equal stride
>>> m = nn.UpsampleCov2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = autograd.Variable(torch.randn(20, 16, 50, 100))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = autograd.Variable(torch.randn(1, 16, 12, 12))
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, scale_factor=1, bias=True):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.scale_factor = scale_factor
self.weight = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter
# ... truncated (>4000 chars) for memory efficiency |
TranLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py
# Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# aggNodes => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/77/c77fhg42q7klen5ufr7gfavdlq2lmbawyinwhesqnyoe7oze7ag6.py
# Topologically Sorted Source Nodes: [aggNodes, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# aggNodes => add
# relu => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/y2/cy2wuqwbyhvfsexr4wzystx4m55dkmgikh4mwi6rivcn3b42elvm.py
# Topologically Sorted Source Nodes: [aggEmbed_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# aggEmbed_1 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [aggNodes], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [aggNodes, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf2, primals_3, buf6, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((16, 1), (1, 1), torch.bool)
# Topologically Sorted Source Nodes: [aggEmbed_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf5, 16, grid=grid(16), stream=stream0)
del primals_5
return (buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class TranLayer(nn.Module):
def __init__(self, embed_dim, num_nodes):
super(TranLayer, self).__init__()
self.embed_dim = embed_dim
self.num_nodes = num_nodes
self.linear_nodes = nn.Linear(in_features=self.num_nodes,
out_features=1, bias=True)
self.linear_embed = nn.Linear(in_features=self.embed_dim,
out_features=1, bias=True)
self.reset_parameters()
def reset_parameters(self):
""" Reinitialize learnable parameters. """
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear_nodes.weight, gain=gain)
nn.init.xavier_normal_(self.linear_embed.weight, gain=gain)
def forward(self, embed_feat: 'torch.Tensor'):
reshapedEmbedFeat = torch.transpose(embed_feat, -2, -1)
aggNodes = self.linear_nodes(reshapedEmbedFeat)
aggNodes = F.relu(aggNodes).reshape(-1, self.embed_dim)
del reshapedEmbedFeat
aggEmbed = self.linear_embed(aggNodes)
aggEmbed = F.relu(aggEmbed)
del aggNodes
return aggEmbed
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_nodes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((16, 1), (1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4,
primals_5, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6
class TranLayerNew(nn.Module):
def __init__(self, embed_dim, num_nodes):
super(TranLayerNew, self).__init__()
self.embed_dim = embed_dim
self.num_nodes = num_nodes
self.linear_nodes = nn.Linear(in_features=self.num_nodes,
out_features=1, bias=True)
self.linear_embed = nn.Linear(in_features=self.embed_dim,
out_features=1, bias=True)
self.reset_parameters()
def reset_parameters(self):
""" Reinitialize learnable parameters. """
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear_nodes.weight, gain=gain)
nn.init.xavier_normal_(self.linear_embed.weight, gain=gain)
def forward(self, input_0):
primals_2 = self.linear_nodes.weight
primals_3 = self.linear_nodes.bias
primals_4 = self.linear_embed.weight
primals_5 = self.linear_embed.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| WingsUpete/EEG2Age | TranLayer | false | 5,985 | [
"MIT"
] | 1 | 8d7b9049fe4e47c701659bbbf2843600fa7c8d8d | https://github.com/WingsUpete/EEG2Age/tree/8d7b9049fe4e47c701659bbbf2843600fa7c8d8d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, embed_dim, num_nodes):
super().__init__()
self.embed_dim = embed_dim
self.num_nodes = num_nodes
self.linear_nodes = nn.Linear(in_features=self.num_nodes,
out_features=1, bias=True)
self.linear_embed = nn.Linear(in_features=self.embed_dim,
out_features=1, bias=True)
self.reset_parameters()
def reset_parameters(self):
""" Reinitialize learnable parameters. """
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear_nodes.weight, gain=gain)
nn.init.xavier_normal_(self.linear_embed.weight, gain=gain)
def forward(self, embed_feat: 'torch.Tensor'):
reshapedEmbedFeat = torch.transpose(embed_feat, -2, -1)
aggNodes = self.linear_nodes(reshapedEmbedFeat)
aggNodes = F.relu(aggNodes).reshape(-1, self.embed_dim)
del reshapedEmbedFeat
aggEmbed = self.linear_embed(aggNodes)
aggEmbed = F.relu(aggEmbed)
del aggNodes
return aggEmbed
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TokenEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wf/cwfgpodivarq2gzz7nodlok35jybiut5djlrlgyaw23yzzih2tt7.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy]
# Source node to ATen node mapping:
# pad => copy
# Graph fragment:
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {})
# %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {})
triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 24
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 6
x2 = xindex
y1 = (yindex // 6)
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float("nan")
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + ((-20) + x2 + (4*y0) + (16*y1)), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x2 + (4*y0) + (16*y1)), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + (6*x2) + (24*y1)), tmp42, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_scatter_default_2, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_copy_0.run(primals_1, buf1, 24, 4, grid=grid(24, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 24
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 6
x2 = xindex
y1 = yindex // 6
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float('nan')
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK
=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(64)](buf3, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1
class TokenEmbeddingNew(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbeddingNew, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, input_0):
primals_2 = self.tokenConv.weight
primals_3 = self.tokenConv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Xianchao-Wu/informer | TokenEmbedding | false | 5,986 | [
"Apache-2.0"
] | 1 | bb9cb3c6ff9e7e76c8dbbf3bcc7924df1f18982d | https://github.com/Xianchao-Wu/informer/tree/bb9cb3c6ff9e7e76c8dbbf3bcc7924df1f18982d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c_in, d_model):
super().__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
InversePose | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7k/c7kwcfcuvlusmnxjznncamkapi5fn6ei5hugqjy6mymrjfmh7jkq.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, -1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 3
x2 = (xindex // 9)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2v/c2v5qbzq6y7mlijzpjbwl6jmt6sqsabsp6c5xd65ma5ymhh54bhs.py
# Topologically Sorted Source Nodes: [new_zeros, pose_inv, setitem, setitem_1, setitem_2], Original ATen: [aten.new_zeros, aten.add, aten.copy, aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# new_zeros => full
# pose_inv => add
# setitem => copy
# setitem_1 => copy_1
# setitem_2 => copy_2, full_default
# Graph fragment:
# %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%full, 1e-06), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_6, %permute), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %copy, 2, 0, 3), kwargs = {})
# %slice_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%add, %slice_scatter_default, 1, 0, 3), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_13, %bmm), kwargs = {})
# %slice_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %copy_1, 2, 3, 4), kwargs = {})
# %slice_scatter_default_3 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_scatter_default_2, 1, 0, 3), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_3, %full_default), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int, %copy_2, 1, 3), kwargs = {})
# %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%slice_scatter_default_3, %select_scatter_default, 1, 3), kwargs = {})
triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1 = async_compile.triton('triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = x2
tmp1 = tl.full([1, 1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = y0
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1, 1], 3, tl.int64)
tmp6 = tmp5 < tmp5
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp5
tmp9 = tmp8 & tmp6
tmp10 = tl.load(in_ptr0 + (tl.broadcast_to(3 + (3*y1), [XBLOCK, YBLOCK])), tmp9 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp6 & tmp6
tmp12 = tmp7 < tmp5
tmp13 = tmp12 & tmp11
tmp14 = tl.load(in_ptr1 + (tl.broadcast_to(3 + (4*y3), [XBLOCK, YBLOCK])), tmp13 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp15 = 1e-06
tmp16 = tl.where(tmp12, tmp14, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp6, tmp18, tmp15)
tmp20 = tl.where(tmp8, tmp10, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp6, tmp20, tmp21)
tmp23 = tmp12 & tmp6
tmp24 = tl.load(in_ptr1 + (tl.broadcast_to(3 + (4*y3), [XBLOCK, YBLOCK])), tmp23 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp12, tmp24, tmp15)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp6, tmp25, tmp26)
tmp28 = tl.where(tmp6, tmp27, tmp15)
tmp29 = tl.where(tmp6, tmp22, tmp28)
tmp30 = 1.0
tmp31 = tl.where(tmp4, tmp30, tmp29)
tmp32 = tmp0 < tmp5
tmp33 = tmp8 & tmp32
tmp34 = tl.load(in_ptr0 + (x2 + (3*y1)), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.broadcast_to(x2, [XBLOCK, YBLOCK])
tmp36 = tmp35 < tmp5
tmp37 = tmp36 & tmp32
tmp38 = tmp12 & tmp37
tmp39 = tl.load(in_ptr1 + (x2 + (4*y3)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp12, tmp39, tmp15)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp37, tmp40, tmp41)
tmp43 = tl.where(tmp36, tmp42, tmp15)
tmp44 = tl.where(tmp8, tmp34, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp32, tmp44, tmp45)
tmp47 = tmp12 & tmp32
tmp48 = tl.load(in_ptr1 + (x2 + (4*y3)), tmp47 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp49 = tl.where(tmp12, tmp48, tmp15)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp32, tmp49, tmp50)
tmp52 = tl.where(tmp32, tmp51, tmp15)
tmp53 = tl.where(tmp32, tmp46, tmp52)
tmp54 = tl.where(tmp2, tmp31, tmp53)
tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp54, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 3), (9, 1, 3), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 36, grid=grid(36), stream=stream0)
buf1 = empty_strided_cuda((4, 3, 1), (3, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, matmul], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(arg0_1, (4, 3, 1), (16, 4, 1), 3), out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [new_zeros, pose_inv, setitem, setitem_1, setitem_2], Original ATen: [aten.new_zeros, aten.add, aten.copy, aten.lift_fresh, aten.fill]
triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1.run(buf1, arg0_1, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
del arg0_1
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def inverse_pose(pose, eps=1e-06):
"""Function that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> pose_inv = tgm.inverse_pose(pose) # Nx4x4
"""
if not torch.is_tensor(pose):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(pose)))
if not len(pose.shape) == 3 and pose.shape[-2:] == (4, 4):
raise ValueError('Input size must be a Nx4x4 tensor. Got {}'.format
(pose.shape))
r_mat = pose[..., :3, 0:3]
t_vec = pose[..., :3, 3:4]
r_mat_trans = torch.transpose(r_mat, 1, 2)
pose_inv = pose.new_zeros(pose.shape) + eps
pose_inv[..., :3, 0:3] = r_mat_trans
pose_inv[..., :3, 3:4] = torch.matmul(-1.0 * r_mat_trans, t_vec)
pose_inv[..., 3, 3] = 1.0
return pose_inv
class InversePose(nn.Module):
"""Creates a transformation that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> transform = tgm.InversePose()
>>> pose_inv = transform(pose) # Nx4x4
"""
def __init__(self):
super(InversePose, self).__init__()
def forward(self, input):
return inverse_pose(input)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1(in_ptr0, in_ptr1,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = x2
tmp1 = tl.full([1, 1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = y0
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1, 1], 3, tl.int64)
tmp6 = tmp5 < tmp5
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp5
tmp9 = tmp8 & tmp6
tmp10 = tl.load(in_ptr0 + tl.broadcast_to(3 + 3 * y1, [XBLOCK, YBLOCK]),
tmp9 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp6 & tmp6
tmp12 = tmp7 < tmp5
tmp13 = tmp12 & tmp11
tmp14 = tl.load(in_ptr1 + tl.broadcast_to(3 + 4 * y3, [XBLOCK, YBLOCK]),
tmp13 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp15 = 1e-06
tmp16 = tl.where(tmp12, tmp14, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp6, tmp18, tmp15)
tmp20 = tl.where(tmp8, tmp10, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp6, tmp20, tmp21)
tmp23 = tmp12 & tmp6
tmp24 = tl.load(in_ptr1 + tl.broadcast_to(3 + 4 * y3, [XBLOCK, YBLOCK]),
tmp23 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp12, tmp24, tmp15)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp6, tmp25, tmp26)
tmp28 = tl.where(tmp6, tmp27, tmp15)
tmp29 = tl.where(tmp6, tmp22, tmp28)
tmp30 = 1.0
tmp31 = tl.where(tmp4, tmp30, tmp29)
tmp32 = tmp0 < tmp5
tmp33 = tmp8 & tmp32
tmp34 = tl.load(in_ptr0 + (x2 + 3 * y1), tmp33 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.broadcast_to(x2, [XBLOCK, YBLOCK])
tmp36 = tmp35 < tmp5
tmp37 = tmp36 & tmp32
tmp38 = tmp12 & tmp37
tmp39 = tl.load(in_ptr1 + (x2 + 4 * y3), tmp38 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp12, tmp39, tmp15)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp37, tmp40, tmp41)
tmp43 = tl.where(tmp36, tmp42, tmp15)
tmp44 = tl.where(tmp8, tmp34, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp32, tmp44, tmp45)
tmp47 = tmp12 & tmp32
tmp48 = tl.load(in_ptr1 + (x2 + 4 * y3), tmp47 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.where(tmp12, tmp48, tmp15)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp32, tmp49, tmp50)
tmp52 = tl.where(tmp32, tmp51, tmp15)
tmp53 = tl.where(tmp32, tmp46, tmp52)
tmp54 = tl.where(tmp2, tmp31, tmp53)
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp54, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 3), (9, 1, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(36)](arg0_1, buf0, 36, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 3, 1), (3, 1, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(arg0_1, (4, 3, 1), (16,
4, 1), 3), out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_copy_fill_lift_fresh_new_zeros_1[grid(16, 4)](buf1
, arg0_1, buf2, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1,
num_stages=1)
del arg0_1
del buf1
return buf2,
def inverse_pose(pose, eps=1e-06):
"""Function that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> pose_inv = tgm.inverse_pose(pose) # Nx4x4
"""
if not torch.is_tensor(pose):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(pose)))
if not len(pose.shape) == 3 and pose.shape[-2:] == (4, 4):
raise ValueError('Input size must be a Nx4x4 tensor. Got {}'.format
(pose.shape))
r_mat = pose[..., :3, 0:3]
t_vec = pose[..., :3, 3:4]
r_mat_trans = torch.transpose(r_mat, 1, 2)
pose_inv = pose.new_zeros(pose.shape) + eps
pose_inv[..., :3, 0:3] = r_mat_trans
pose_inv[..., :3, 3:4] = torch.matmul(-1.0 * r_mat_trans, t_vec)
pose_inv[..., 3, 3] = 1.0
return pose_inv
class InversePoseNew(nn.Module):
"""Creates a transformation that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> transform = tgm.InversePose()
>>> pose_inv = transform(pose) # Nx4x4
"""
def __init__(self):
super(InversePoseNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Wizaron/torchgeometry | InversePose | false | 5,987 | [
"Apache-2.0"
] | 1 | 59a8d25dd811ded6a139d5c0c2442b06f43dc775 | https://github.com/Wizaron/torchgeometry/tree/59a8d25dd811ded6a139d5c0c2442b06f43dc775 | import torch
import torch.nn as nn
def inverse_pose(pose, eps=1e-06):
"""Function that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> pose_inv = tgm.inverse_pose(pose) # Nx4x4
"""
if not torch.is_tensor(pose):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(pose)))
if not len(pose.shape) == 3 and pose.shape[-2:] == (4, 4):
raise ValueError('Input size must be a Nx4x4 tensor. Got {}'.format
(pose.shape))
r_mat = pose[..., :3, 0:3]
t_vec = pose[..., :3, 3:4]
r_mat_trans = torch.transpose(r_mat, 1, 2)
pose_inv = pose.new_zeros(pose.shape) + eps
pose_inv[..., :3, 0:3] = r_mat_trans
pose_inv[..., :3, 3:4] = torch.matmul(-1.0 * r_mat_trans, t_vec)
pose_inv[..., 3, 3] = 1.0
return pose_inv
class Model(nn.Module):
"""Creates a transformation that inverts a 4x4 pose.
Args:
points (Tensor): tensor with poses.
Returns:
Tensor: tensor with inverted poses.
Shape:
- Input: :math:`(N, 4, 4)`
- Output: :math:`(N, 4, 4)`
Example:
>>> pose = torch.rand(1, 4, 4) # Nx4x4
>>> transform = tgm.InversePose()
>>> pose_inv = transform(pose) # Nx4x4
"""
def __init__(self):
super().__init__()
def forward(self, input):
return inverse_pose(input)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
BalancedL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qi/cqika2uwev5ndztshbfb45qz54a3joa3nw5e33kmmgggt6wenpt3.py
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, sum_1, truediv_1, loss_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# diff => abs_1
# log => log
# loss => where
# loss_1 => mul_6
# lt => lt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=5] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.02619784824562798), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 1.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 0.07859354473688394), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %sub_1, %sub_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 16.0001), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 1.0), kwargs = {})
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 0.062499609377441395
tmp28 = tmp26 * tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, sum_1, truediv_1, loss_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.onnx
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
if reduction == 'none':
loss = loss
elif reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
loss = loss.mean()
else:
raise NotImplementedError
return loss
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, loss_weight=1.0, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
super(BalancedL1Loss, self).__init__()
self.beta = beta
self.alpha = alpha
self.gamma = gamma
self.weight = loss_weight
self.reduction = reduction
assert reduction == 'none', 'only none reduction is support!'
def forward(self, pred, gt, mask=None):
num = pred.size(0) * pred.size(1)
if mask is not None:
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(pred).float()
pred = pred * mask
gt = gt * mask
loss = balanced_l1_loss(pred, gt, beta=self.beta, alpha=self.alpha,
gamma=self.gamma, reduction=self.reduction)
loss = loss.sum() / (num + 0.0001) * self.weight
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 0.062499609377441395
tmp28 = tmp26 * tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
if reduction == 'none':
loss = loss
elif reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
loss = loss.mean()
else:
raise NotImplementedError
return loss
class BalancedL1LossNew(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, loss_weight=1.0, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
super(BalancedL1LossNew, self).__init__()
self.beta = beta
self.alpha = alpha
self.gamma = gamma
self.weight = loss_weight
self.reduction = reduction
assert reduction == 'none', 'only none reduction is support!'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Xiangzhaohong/LidarNet | BalancedL1Loss | false | 5,988 | [
"Apache-2.0"
] | 1 | 42d025a7b629e387c9b9b01ead3558a8da81a3b0 | https://github.com/Xiangzhaohong/LidarNet/tree/42d025a7b629e387c9b9b01ead3558a8da81a3b0 | import torch
import numpy as np
import torch.nn as nn
import torch.onnx
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
if reduction == 'none':
loss = loss
elif reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
loss = loss.mean()
else:
raise NotImplementedError
return loss
class Model(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, loss_weight=1.0, beta=1.0, alpha=0.5, gamma=1.5,
reduction='none'):
super().__init__()
self.beta = beta
self.alpha = alpha
self.gamma = gamma
self.weight = loss_weight
self.reduction = reduction
assert reduction == 'none', 'only none reduction is support!'
def forward(self, pred, gt, mask=None):
num = pred.size(0) * pred.size(1)
if mask is not None:
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(pred).float()
pred = pred * mask
gt = gt * mask
loss = balanced_l1_loss(pred, gt, beta=self.beta, alpha=self.alpha,
gamma=self.gamma, reduction=self.reduction)
loss = loss.sum() / (num + 0.0001) * self.weight
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
triplet_my_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3k/c3kek72pwrcrumjnefj2rvdl74nxdxgux5tdgy2ov2z6zasezvm7.py
# Topologically Sorted Source Nodes: [mul, add, x, mul_1, add_1, x_1, mse_loss, s1, mul_3, mul_2, add_2, x_2, mse_loss_1, s2, pow_1, loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss, aten.sum, aten.pow]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => mul_4
# mse_loss => mean, pow_7, sub
# mse_loss_1 => mean_1, pow_8, sub_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_9
# s1 => sum_4
# s2 => sum_5
# x => div
# x_1 => div_1
# x_2 => div_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-12), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-12), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_7,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 1.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_2, 1e-12), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %add_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_2), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_8,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean_1,), kwargs = {})
# %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, -1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %pow_9), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 15, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp3 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (64 + r2), None)
tmp20 = tl.load(in_ptr0 + (64 + (4*r1)), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (65 + (4*r1)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (66 + (4*r1)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (67 + (4*r1)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (128 + r2), None)
tmp41 = tl.load(in_ptr0 + (128 + (4*r1)), None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (129 + (4*r1)), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (130 + (4*r1)), None, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (131 + (4*r1)), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tmp19 = tmp18 * tmp1
tmp21 = tmp20 * tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.sqrt(tmp30)
tmp32 = tmp31 + tmp15
tmp33 = tmp19 / tmp32
tmp34 = tmp17 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK])
tmp38 = tl.sum(tmp36, 1)[:, None]
tmp40 = tmp39 * tmp1
tmp42 = tmp41 * tmp41
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = libdevice.sqrt(tmp51)
tmp53 = tmp52 + tmp15
tmp54 = tmp40 / tmp53
tmp55 = tmp17 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 64.0
tmp61 = tmp38 / tmp60
tmp62 = tmp61 * tmp1
tmp63 = tmp59 / tmp60
tmp64 = tl.full([1, 1], 1, tl.int32)
tmp65 = tmp64 / tmp63
tmp66 = tmp62 * tmp65
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp66, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, add, x, mul_1, add_1, x_1, mse_loss, s1, mul_3, mul_2, add_2, x_2, mse_loss_1, s2, pow_1, loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss, aten.sum, aten.pow]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_pow_sum_0.run(buf3, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1.0 * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
class triplet_my_loss(nn.Module):
def __init__(self, margin=1.0, use_gpu=True):
super(triplet_my_loss, self).__init__()
self.use_gpu = use_gpu
self.margin = margin
self.mse = nn.MSELoss()
def forward(self, inputs):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
p = inputs[0]
p1 = inputs[1]
n1 = inputs[2]
anchor = normalize(p, axis=-1)
positive = normalize(p1, axis=-1)
negative = normalize(n1, axis=-1)
s1 = torch.sum(self.mse(anchor, positive))
s2 = torch.sum(self.mse(anchor, negative))
loss = torch.mul(torch.mul(s1, self.margin), torch.pow(s2, -1))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_pow_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (64 + r2), None)
tmp20 = tl.load(in_ptr0 + (64 + 4 * r1), None, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (65 + 4 * r1), None, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (66 + 4 * r1), None, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr0 + (67 + 4 * r1), None, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr0 + (128 + r2), None)
tmp41 = tl.load(in_ptr0 + (128 + 4 * r1), None, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr0 + (129 + 4 * r1), None, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (130 + 4 * r1), None, eviction_policy=
'evict_last')
tmp49 = tl.load(in_ptr0 + (131 + 4 * r1), None, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tmp19 = tmp18 * tmp1
tmp21 = tmp20 * tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.sqrt(tmp30)
tmp32 = tmp31 + tmp15
tmp33 = tmp19 / tmp32
tmp34 = tmp17 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK])
tmp38 = tl.sum(tmp36, 1)[:, None]
tmp40 = tmp39 * tmp1
tmp42 = tmp41 * tmp41
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = libdevice.sqrt(tmp51)
tmp53 = tmp52 + tmp15
tmp54 = tmp40 / tmp53
tmp55 = tmp17 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 64.0
tmp61 = tmp38 / tmp60
tmp62 = tmp61 * tmp1
tmp63 = tmp59 / tmp60
tmp64 = tl.full([1, 1], 1, tl.int32)
tmp65 = tmp64 / tmp63
tmp66 = tmp62 * tmp65
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp66, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_pow_sum_0[grid(1)](buf3,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf3,
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1.0 * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
class triplet_my_lossNew(nn.Module):
def __init__(self, margin=1.0, use_gpu=True):
super(triplet_my_lossNew, self).__init__()
self.use_gpu = use_gpu
self.margin = margin
self.mse = nn.MSELoss()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Xavierxhq/fruit_identification | triplet_my_loss | false | 5,989 | [
"MIT"
] | 1 | 54cdf2c3e0aad26ae98b081e44ad1655b6f0a758 | https://github.com/Xavierxhq/fruit_identification/tree/54cdf2c3e0aad26ae98b081e44ad1655b6f0a758 | import torch
from torch import nn
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1.0 * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
class Model(nn.Module):
def __init__(self, margin=1.0, use_gpu=True):
super().__init__()
self.use_gpu = use_gpu
self.margin = margin
self.mse = nn.MSELoss()
def forward(self, inputs):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
p = inputs[0]
p1 = inputs[1]
n1 = inputs[2]
anchor = normalize(p, axis=-1)
positive = normalize(p1, axis=-1)
negative = normalize(n1, axis=-1)
s1 = torch.sum(self.mse(anchor, positive))
s2 = torch.sum(self.mse(anchor, negative))
loss = torch.mul(torch.mul(s1, self.margin), torch.pow(s2, -1))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LinearExcitability | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bh/cbhpcgxjg3mwo4dulstw5ie26none2yzi5sysdzl34cu6pyah4fg.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view]
# Source node to ATen node mapping:
# output_1 => add, view_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_2), kwargs = {})
# %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {})
triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_add_view_0.run(buf2, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = input.matmul(weight.t()) * excitability
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
return output
class LinearExcitability(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super(LinearExcitability, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.Tensor(out_features).uniform_(1, 1)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters "in-place" to initialize / reset them at appropriate values."""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.excitability is not None:
self.excitability.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
"""Running this model's forward step requires/returns:
-[input]: [batch_size]x[...]x[in_features]
-[output]: [batch_size]x[...]x[hidden_features]"""
if self.excit_buffer is None:
excitability = self.excitability
elif self.excitability is None:
excitability = self.excit_buffer
else:
excitability = self.excitability * self.excit_buffer
return linearExcitability(input, self.weight, excitability, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_view_0[grid(256)](buf2, primals_2, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = input.matmul(weight.t()) * excitability
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
return output
class LinearExcitabilityNew(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super(LinearExcitabilityNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.Tensor(out_features).uniform_(1, 1)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters "in-place" to initialize / reset them at appropriate values."""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.excitability is not None:
self.excitability.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features) + ')'
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| XSMUBC/DNC-lifelong-learning | LinearExcitability | false | 5,990 | [
"MIT"
] | 1 | 55b40bad65eb3cb68c50411acf8f770bfc52e3d9 | https://github.com/XSMUBC/DNC-lifelong-learning/tree/55b40bad65eb3cb68c50411acf8f770bfc52e3d9 | import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = input.matmul(weight.t()) * excitability
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
return output
class Model(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.Tensor(out_features).uniform_(1, 1)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters "in-place" to initialize / reset them at appropriate values."""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.excitability is not None:
self.excitability.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
"""Running this model's forward step requires/returns:
-[input]: [batch_size]x[...]x[in_features]
-[output]: [batch_size]x[...]x[hidden_features]"""
if self.excit_buffer is None:
excitability = self.excitability
elif self.excitability is None:
excitability = self.excit_buffer
else:
excitability = self.excitability * self.excit_buffer
return linearExcitability(input, self.weight, excitability, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TemporalEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/aj/cajffqesecvkyqpb4xsm3itr6xvdpqs6wg4viw32uiltwyyv44is.py
# Topologically Sorted Source Nodes: [embedding, embedding_1, add, embedding_2, add_1, embedding_3, add_2, add_3], Original ATen: [aten.embedding, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# embedding => embedding
# embedding_1 => embedding_1
# embedding_2 => embedding_2
# embedding_3 => embedding_3
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg1_1, %select), kwargs = {})
# %embedding_1 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg2_1, %select_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%embedding, %embedding_1), kwargs = {})
# %embedding_2 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg3_1, %select_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %embedding_2), kwargs = {})
# %embedding_3 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg4_1, %select_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %embedding_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0), kwargs = {})
triton_poi_fused_add_embedding_0 = async_compile.triton('triton_poi_fused_add_embedding_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 24)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 24")
tmp7 = tl.load(in_ptr1 + (x0 + (4*tmp5)), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert(((0 <= tmp13) & (tmp13 < 7)) | ~(xmask), "index out of bounds: 0 <= tmp13 < 7")
tmp15 = tl.load(in_ptr2 + (x0 + (4*tmp13)), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert(((0 <= tmp22) & (tmp22 < 32)) | ~(xmask), "index out of bounds: 0 <= tmp22 < 32")
tmp24 = tl.load(in_ptr3 + (x0 + (4*tmp22)), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert(((0 <= tmp31) & (tmp31 < 13)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 13")
tmp33 = tl.load(in_ptr4 + (x0 + (4*tmp31)), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tl.store(out_ptr0 + (x4), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (24, 4), (4, 1))
assert_size_stride(arg2_1, (7, 4), (4, 1))
assert_size_stride(arg3_1, (32, 4), (4, 1))
assert_size_stride(arg4_1, (13, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, embedding_1, add, embedding_2, add_1, embedding_3, add_2, add_3], Original ATen: [aten.embedding, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_embedding_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((7, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((13, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask,
'index out of bounds: 0 <= tmp5 < 24')
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp5), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask,
'index out of bounds: 0 <= tmp13 < 7')
tmp15 = tl.load(in_ptr2 + (x0 + 4 * tmp13), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask,
'index out of bounds: 0 <= tmp22 < 32')
tmp24 = tl.load(in_ptr3 + (x0 + 4 * tmp22), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask,
'index out of bounds: 0 <= tmp31 < 13')
tmp33 = tl.load(in_ptr4 + (x0 + 4 * tmp31), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tl.store(out_ptr0 + x4, tmp36, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (24, 4), (4, 1))
assert_size_stride(arg2_1, (7, 4), (4, 1))
assert_size_stride(arg3_1, (32, 4), (4, 1))
assert_size_stride(arg4_1, (13, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_embedding_0[grid(256)](arg0_1, arg1_1, arg2_1,
arg3_1, arg4_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbeddingNew(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbeddingNew, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, input_0):
arg1_1 = self.hour_embed.emb.weight
arg2_1 = self.weekday_embed.emb.weight
arg3_1 = self.day_embed.emb.weight
arg4_1 = self.month_embed.emb.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
| Xianchao-Wu/informer | TemporalEmbedding | false | 5,991 | [
"Apache-2.0"
] | 1 | bb9cb3c6ff9e7e76c8dbbf3bcc7924df1f18982d | https://github.com/Xianchao-Wu/informer/tree/bb9cb3c6ff9e7e76c8dbbf3bcc7924df1f18982d | import math
import torch
import torch.nn as nn
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super().__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class Model(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super().__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
StraightThroughEstimator | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/te/cte662dz5uwvexkyn4i5k7zcpexn4oecbldxbr6yqmrjc6gzepr4.py
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
# Source node to ATen node mapping:
# float_1 => convert_element_type
# gt => gt
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
triton_poi_fused__to_copy_gt_0 = async_compile.triton('triton_poi_fused__to_copy_gt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_gt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.transforms import functional as F
import torch.jit
def straight_through_estimator(input: 'torch.Tensor') ->torch.Tensor:
""" straight through estimator
>>> straight_through_estimator(torch.randn(3, 3))
tensor([[0., 1., 0.],
[0., 1., 1.],
[0., 0., 1.]])
"""
return _STE.apply(input)
class _STE(Function):
""" Straight Through Estimator
"""
@staticmethod
def forward(ctx, input: 'torch.Tensor') ->torch.Tensor:
return (input > 0).float()
@staticmethod
def backward(ctx, grad_output: 'torch.Tensor') ->torch.Tensor:
return F.hardtanh(grad_output)
class StraightThroughEstimator(nn.Module):
def __init__(self):
super(StraightThroughEstimator, self).__init__()
def forward(self, input: 'torch.Tensor'):
return straight_through_estimator(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import torch.nn.functional as F
from torch import nn
from torchvision.transforms import functional as F
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def straight_through_estimator(input: 'torch.Tensor') ->torch.Tensor:
""" straight through estimator
>>> straight_through_estimator(torch.randn(3, 3))
tensor([[0., 1., 0.],
[0., 1., 1.],
[0., 0., 1.]])
"""
return _STE.apply(input)
class _STE(Function):
""" Straight Through Estimator
"""
@staticmethod
def forward(ctx, input: 'torch.Tensor') ->torch.Tensor:
return (input > 0).float()
@staticmethod
def backward(ctx, grad_output: 'torch.Tensor') ->torch.Tensor:
return F.hardtanh(grad_output)
class StraightThroughEstimatorNew(nn.Module):
def __init__(self):
super(StraightThroughEstimatorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Xiangyu-Han/homura | StraightThroughEstimator | false | 5,992 | [
"Apache-2.0"
] | 1 | c366ca70b4b65f6a4809bf76926bbd926320262e | https://github.com/Xiangyu-Han/homura/tree/c366ca70b4b65f6a4809bf76926bbd926320262e | from torch.autograd import Function
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.transforms import functional as F
import torch.jit
def straight_through_estimator(input: 'torch.Tensor') ->torch.Tensor:
""" straight through estimator
>>> straight_through_estimator(torch.randn(3, 3))
tensor([[0., 1., 0.],
[0., 1., 1.],
[0., 0., 1.]])
"""
return _STE.apply(input)
class _STE(Function):
""" Straight Through Estimator
"""
@staticmethod
def forward(ctx, input: 'torch.Tensor') ->torch.Tensor:
return (input > 0).float()
@staticmethod
def backward(ctx, grad_output: 'torch.Tensor') ->torch.Tensor:
return F.hardtanh(grad_output)
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor'):
return straight_through_estimator(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GateLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xd/cxd4uci6rjegwbywzsx6cw3kvglstmvbzbgt3umstqvdwi7esmj5.py
# Topologically Sorted Source Nodes: [gate, mul, sub, mul_1, gated_emb], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# gate => sigmoid
# gated_emb => add
# mul => mul
# mul_1 => mul_1
# sub => sub
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp6 = tl.load(in_ptr2 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_input], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate, mul, sub, mul_1, gated_emb], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_1.run(buf3, primals_1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, primals_1, primals_2, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf1, buf3, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GateLayer(nn.Module):
def __init__(self, input_dim):
super(GateLayer, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
self._norm_layer2 = nn.Linear(input_dim, 1)
def forward(self, input1, input2):
norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1))
gate = torch.sigmoid(self._norm_layer2(norm_input))
gated_emb = gate * input1 + (1 - gate) * input2
return gated_emb
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf3, primals_1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf4, primals_1, primals_2, reinterpret_tensor(buf0, (64, 8), (8,
1), 0), buf1, buf3, primals_5
class GateLayerNew(nn.Module):
def __init__(self, input_dim):
super(GateLayerNew, self).__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
self._norm_layer2 = nn.Linear(input_dim, 1)
def forward(self, input_0, input_1):
primals_3 = self._norm_layer1.weight
primals_4 = self._norm_layer1.bias
primals_5 = self._norm_layer2.weight
primals_6 = self._norm_layer2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Xiaolong-Qi/CRSLab | GateLayer | false | 5,993 | [
"MIT"
] | 1 | d507378c86f4996727bf062482e1f224486d4533 | https://github.com/Xiaolong-Qi/CRSLab/tree/d507378c86f4996727bf062482e1f224486d4533 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, input_dim):
super().__init__()
self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)
self._norm_layer2 = nn.Linear(input_dim, 1)
def forward(self, input1, input2):
norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1))
gate = torch.sigmoid(self._norm_layer2(norm_input))
gated_emb = gate * input1 + (1 - gate) * input2
return gated_emb
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SpatialPyramidPooling2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/eb/ceb5n3ngnbtecolkfq76e577yj7jgiajgbg2es3d3ftje6u4ouia.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ag/caged2v3ghesufrt2xaas6sdgsryky6qbfqxwx2eodbdhhnn4p7l.py
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_2 => _low_memory_max_pool2d_with_offsets_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [2, 2], [2, 2], [1, 1], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3) % 3
x0 = xindex % 3
x2 = (xindex // 9)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2*x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tl.store(out_ptr0 + (x4), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j5/cj5wkl65czizsr4nzr4kzsxgyhcgrezmthc2buavxem2hznh5cig.py
# Topologically Sorted Source Nodes: [res_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# res_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %view_3], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 120
x1 = (xindex // 120)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 56, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 20, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((4*x1) + x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp0 >= tmp8
tmp13 = tmp12 & tmp7
tmp14 = tl.load(in_ptr1 + ((2*(((-4) + x0) % 2)) + (8*((((-4) + x0) // 2) % 8)) + (64*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (1 + (2*(((-4) + x0) % 2)) + (8*((((-4) + x0) // 2) % 8)) + (64*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.load(in_ptr1 + (4 + (2*(((-4) + x0) % 2)) + (8*((((-4) + x0) // 2) % 8)) + (64*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.load(in_ptr1 + (5 + (2*(((-4) + x0) % 2)) + (8*((((-4) + x0) // 2) % 8)) + (64*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp13, tmp20, tmp21)
tmp23 = tl.where(tmp9, tmp11, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp7, tmp23, tmp24)
tmp26 = tmp0 >= tmp5
tmp27 = tmp26 & tmp4
tmp28 = tl.load(in_ptr2 + ((36*x1) + ((-20) + x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.where(tmp6, tmp25, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp4, tmp29, tmp30)
tmp32 = tmp0 >= tmp3
tmp33 = tl.full([1], 120, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr1 + ((64*x1) + (((-56) + x0) % 64)), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.where(tmp4, tmp31, tmp35)
tl.store(out_ptr0 + (x2), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(arg0_1, buf1, 144, grid=grid(144), stream=stream0)
buf2 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [res_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf0, arg0_1, buf1, buf2, 480, grid=grid(480), stream=stream0)
del arg0_1
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from math import floor
from math import ceil
import torch.nn as nn
import torch.nn.functional as F
class SpatialPyramidPooling2d(nn.Module):
"""apply spatial pyramid pooling over a 4d input(a mini-batch of 2d inputs
with additional channel dimension) as described in the paper
'Spatial Pyramid Pooling in deep convolutional Networks for visual recognition'
Args:
num_level:
pool_type: max_pool, avg_pool, Default:max_pool
By the way, the target output size is num_grid:
num_grid = 0
for i in range num_level:
num_grid += (i + 1) * (i + 1)
num_grid = num_grid * channels # channels is the channel dimension of input data
examples:
>>> input = torch.randn((1,3,32,32), dtype=torch.float32)
>>> net = torch.nn.Sequential(nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=1),\\
nn.ReLU(),\\
SpatialPyramidPooling2d(num_level=2,pool_type='avg_pool'),\\
nn.Linear(32 * (1*1 + 2*2), 10))
>>> output = net(input)
"""
def __init__(self, num_level, pool_type='max_pool'):
super(SpatialPyramidPooling2d, self).__init__()
self.num_level = num_level
self.pool_type = pool_type
def forward(self, x):
N, _C, H, W = x.size()
for i in range(self.num_level):
level = i + 1
kernel_size = ceil(H / level), ceil(W / level)
stride = ceil(H / level), ceil(W / level)
padding = floor((kernel_size[0] * level - H + 1) / 2), floor((
kernel_size[1] * level - W + 1) / 2)
if self.pool_type == 'max_pool':
tensor = F.max_pool2d(x, kernel_size=kernel_size, stride=
stride, padding=padding).view(N, -1)
else:
tensor = F.avg_pool2d(x, kernel_size=kernel_size, stride=
stride, padding=padding).view(N, -1)
if i == 0:
res = tensor
else:
res = torch.cat((res, tensor), 1)
return res
def __repr__(self):
return self.__class__.__name__ + '(' + 'num_level = ' + str(self.
num_level) + ', pool_type = ' + str(self.pool_type) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_level': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tl.store(out_ptr0 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 120
x1 = xindex // 120
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 56, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 20, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (4 * x1 + x0), tmp10 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tmp0 >= tmp8
tmp13 = tmp12 & tmp7
tmp14 = tl.load(in_ptr1 + (2 * ((-4 + x0) % 2) + 8 * ((-4 + x0) // 2 %
8) + 64 * x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (1 + 2 * ((-4 + x0) % 2) + 8 * ((-4 + x0) //
2 % 8) + 64 * x1), tmp13 & xmask, eviction_policy='evict_last',
other=0.0)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.load(in_ptr1 + (4 + 2 * ((-4 + x0) % 2) + 8 * ((-4 + x0) //
2 % 8) + 64 * x1), tmp13 & xmask, eviction_policy='evict_last',
other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.load(in_ptr1 + (5 + 2 * ((-4 + x0) % 2) + 8 * ((-4 + x0) //
2 % 8) + 64 * x1), tmp13 & xmask, eviction_policy='evict_last',
other=0.0)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp13, tmp20, tmp21)
tmp23 = tl.where(tmp9, tmp11, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp7, tmp23, tmp24)
tmp26 = tmp0 >= tmp5
tmp27 = tmp26 & tmp4
tmp28 = tl.load(in_ptr2 + (36 * x1 + (-20 + x0)), tmp27 & xmask,
eviction_policy='evict_last', other=0.0)
tmp29 = tl.where(tmp6, tmp25, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp4, tmp29, tmp30)
tmp32 = tmp0 >= tmp3
tl.full([1], 120, tl.int64)
tmp35 = tl.load(in_ptr1 + (64 * x1 + (-56 + x0) % 64), tmp32 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.where(tmp4, tmp31, tmp35)
tl.store(out_ptr0 + x2, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(144)](arg0_1, buf1,
144, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
triton_poi_fused_cat_2[grid(480)](buf0, arg0_1, buf1, buf2, 480,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf0
del buf1
return buf2,
class SpatialPyramidPooling2dNew(nn.Module):
"""apply spatial pyramid pooling over a 4d input(a mini-batch of 2d inputs
with additional channel dimension) as described in the paper
'Spatial Pyramid Pooling in deep convolutional Networks for visual recognition'
Args:
num_level:
pool_type: max_pool, avg_pool, Default:max_pool
By the way, the target output size is num_grid:
num_grid = 0
for i in range num_level:
num_grid += (i + 1) * (i + 1)
num_grid = num_grid * channels # channels is the channel dimension of input data
examples:
>>> input = torch.randn((1,3,32,32), dtype=torch.float32)
>>> net = torch.nn.Sequential(nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=1),\\
nn.ReLU(),\\
SpatialPyramidPooling2d(num_level=2,pool_type='avg_pool'),\\
nn.Linear(32 * (1*1 + 2*2), 10))
>>> output = net(input)
"""
def __init__(self, num_level, pool_type='max_pool'):
super(SpatialPyramidPooling2dNew, self).__init__()
self.num_level = num_level
self.pool_type = pool_type
def __repr__(self):
return self.__class__.__name__ + '(' + 'num_level = ' + str(self.
num_level) + ', pool_type = ' + str(self.pool_type) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Wyattwwwww/CS172_Visualized-Sanitation-Evaluator-in-Microenvironment | SpatialPyramidPooling2d | false | 5,994 | [
"MIT"
] | 1 | 02880a0698f262aad65639e8de52349fdb610355 | https://github.com/Wyattwwwww/CS172_Visualized-Sanitation-Evaluator-in-Microenvironment/tree/02880a0698f262aad65639e8de52349fdb610355 | import torch
from math import floor
from math import ceil
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""apply spatial pyramid pooling over a 4d input(a mini-batch of 2d inputs
with additional channel dimension) as described in the paper
'Spatial Pyramid Pooling in deep convolutional Networks for visual recognition'
Args:
num_level:
pool_type: max_pool, avg_pool, Default:max_pool
By the way, the target output size is num_grid:
num_grid = 0
for i in range num_level:
num_grid += (i + 1) * (i + 1)
num_grid = num_grid * channels # channels is the channel dimension of input data
examples:
>>> input = torch.randn((1,3,32,32), dtype=torch.float32)
>>> net = torch.nn.Sequential(nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=1),\\
nn.ReLU(),\\
SpatialPyramidPooling2d(num_level=2,pool_type='avg_pool'),\\
nn.Linear(32 * (1*1 + 2*2), 10))
>>> output = net(input)
"""
def __init__(self, num_level, pool_type='max_pool'):
super().__init__()
self.num_level = num_level
self.pool_type = pool_type
def forward(self, x):
N, _C, H, W = x.size()
for i in range(self.num_level):
level = i + 1
kernel_size = ceil(H / level), ceil(W / level)
stride = ceil(H / level), ceil(W / level)
padding = floor((kernel_size[0] * level - H + 1) / 2), floor((
kernel_size[1] * level - W + 1) / 2)
if self.pool_type == 'max_pool':
tensor = F.max_pool2d(x, kernel_size=kernel_size, stride=
stride, padding=padding).view(N, -1)
else:
tensor = F.avg_pool2d(x, kernel_size=kernel_size, stride=
stride, padding=padding).view(N, -1)
if i == 0:
res = tensor
else:
res = torch.cat((res, tensor), 1)
return res
def __repr__(self):
return self.__class__.__name__ + '(' + 'num_level = ' + str(self.
num_level) + ', pool_type = ' + str(self.pool_type) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
complex_relu_layer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4v/c4vvh3gdp7ao72pynrppqxrfxhxctxcdh4sebgh2gzu2qpcm53sb.py
# Topologically Sorted Source Nodes: [ge, mask, real_1, img_1], Original ATen: [aten.ge, aten.mul]
# Source node to ATen node mapping:
# ge => ge
# img_1 => mul_2
# mask => mul
# real_1 => mul_1
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%select_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ge, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %select_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %select), kwargs = {})
triton_poi_fused_ge_mul_0 = async_compile.triton('triton_poi_fused_ge_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ge_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ge_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp0
tmp8 = tmp5 * tmp7
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ge, mask, real_1, img_1], Original ATen: [aten.ge, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_ge_mul_0.run(arg0_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class complex_relu_layer(nn.Module):
def __init__(self):
super(complex_relu_layer, self).__init__()
def complex_relu(self, real, img):
mask = 1.0 * (real >= 0)
return mask * real, mask * img
def forward(self, real, img=None):
if img is None:
img = real[1]
real = real[0]
real, img = self.complex_relu(real, img)
return real, img
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_ge_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp0
tmp8 = tmp5 * tmp7
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_ge_mul_0[grid(64)](arg0_1, buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0, buf1
class complex_relu_layerNew(nn.Module):
def __init__(self):
super(complex_relu_layerNew, self).__init__()
def complex_relu(self, real, img):
mask = 1.0 * (real >= 0)
return mask * real, mask * img
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| XitongZhang1994/SimpleMagNet | complex_relu_layer | false | 5,995 | [
"MIT"
] | 1 | d3df7a2f528474214b7d396ea9831db3aa280090 | https://github.com/XitongZhang1994/SimpleMagNet/tree/d3df7a2f528474214b7d396ea9831db3aa280090 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def complex_relu(self, real, img):
mask = 1.0 * (real >= 0)
return mask * real, mask * img
def forward(self, real, img=None):
if img is None:
img = real[1]
real = real[0]
real, img = self.complex_relu(real, img)
return real, img
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Discriminator2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fi/cfiw2yb3plroxcbvajqzv4r2o747ozmvnzitxofswz25dqdgjiny.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# logits => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp11 & xmask, other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf1 = buf0
del buf0
# Topologically Sorted Source Nodes: [bilinear_1], Original ATen: [aten._trilinear]
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_2
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf1, primals_3, buf3, buf4, 128, grid=grid(128), stream=stream0)
del buf1
del buf3
del primals_3
return (buf4, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class Discriminator2(nn.Module):
def __init__(self, n_h):
super(Discriminator2, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp11 & xmask,
other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_4, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf1 = buf0
del buf0
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_5, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_2
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf1, primals_3, buf3, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_3
return buf4, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0)
class Discriminator2New(nn.Module):
def __init__(self, n_h):
super(Discriminator2New, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1, input_2):
primals_2 = self.f_k.weight
primals_3 = self.f_k.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| XrosLiang/GraphCL | Discriminator2 | false | 5,996 | [
"MIT"
] | 1 | fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | https://github.com/XrosLiang/GraphCL/tree/fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | import torch
import torch.utils.data
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PartialConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jv/cjvpm2uzvzcxov2utrsp43izzl2pj4o6udmrwrqavm2egwusqpcr.py
# Topologically Sorted Source Nodes: [ones, output_mask], Original ATen: [aten.ones, aten.convolution]
# Source node to ATen node mapping:
# ones => full_default
# output_mask => convolution_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%full_default, %primals_4, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_ones_0 = async_compile.triton('triton_poi_fused_convolution_ones_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_ones_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_ones_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ra/cram4l4wpjagvdf6lud7d34utcsv7powyv74xnpasaksrkeuee45.py
# Topologically Sorted Source Nodes: [output, output_mask_1, sub, mul, output_1], Original ATen: [aten.convolution, aten.reciprocal, aten.mul, aten.sub, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# output => convolution
# output_1 => add
# output_mask_1 => mul, reciprocal
# sub => sub
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 16), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %expand), kwargs = {})
triton_poi_fused_add_convolution_mul_reciprocal_sub_1 = async_compile.triton('triton_poi_fused_add_convolution_mul_reciprocal_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_reciprocal_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_reciprocal_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tmp2 - tmp1
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = 16.0
tmp9 = tmp7 * tmp8
tmp10 = tmp3 * tmp9
tmp11 = tmp10 + tmp1
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ones, output_mask], Original ATen: [aten.ones, aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_ones_0.run(buf1, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [ones, output_mask], Original ATen: [aten.ones, aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 1, 1, 1), (1, 1, 1, 1))
del buf1
del primals_4
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output, output_mask_1, sub, mul, output_1], Original ATen: [aten.convolution, aten.reciprocal, aten.mul, aten.sub, aten.add]
triton_poi_fused_add_convolution_mul_reciprocal_sub_1.run(buf3, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf3, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(PartialConv, self).__init__()
self.feature_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(1, 1, kernel_size, stride, padding,
dilation, groups, bias=False)
self.window_size = self.mask_conv.kernel_size[0
] * self.mask_conv.kernel_size[1]
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, x):
output = self.feature_conv(x)
if self.feature_conv.bias is not None:
output_bias = self.feature_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output, device=x.device)
with torch.no_grad():
ones = torch.ones(1, 1, x.size(2), x.size(3), device=x.device)
output_mask = self.mask_conv(ones)
output_mask = self.window_size / output_mask
output = (output - output_bias) * output_mask + output_bias
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_ones_0(out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_reciprocal_sub_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tmp2 - tmp1
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = 16.0
tmp9 = tmp7 * tmp8
tmp10 = tmp3 * tmp9
tmp11 = tmp10 + tmp1
tl.store(in_out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_ones_0[grid(16)](buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 1, 1, 1), (1, 1, 1, 1))
del buf1
del primals_4
buf3 = buf0
del buf0
triton_poi_fused_add_convolution_mul_reciprocal_sub_1[grid(16)](buf3,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf3, primals_1, primals_3, buf2
class PartialConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(PartialConvNew, self).__init__()
self.feature_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(1, 1, kernel_size, stride, padding,
dilation, groups, bias=False)
self.window_size = self.mask_conv.kernel_size[0
] * self.mask_conv.kernel_size[1]
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input_0):
primals_1 = self.feature_conv.weight
primals_2 = self.feature_conv.bias
primals_4 = self.mask_conv.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| XiaoSanGit/talking-head-anime-landing | PartialConv | false | 5,997 | [
"MIT"
] | 1 | 36dbf1b8aef7357cda2a3524cb0c533f32670394 | https://github.com/XiaoSanGit/talking-head-anime-landing/tree/36dbf1b8aef7357cda2a3524cb0c533f32670394 | import torch
import torch.nn as nn
import torch.onnx
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.feature_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(1, 1, kernel_size, stride, padding,
dilation, groups, bias=False)
self.window_size = self.mask_conv.kernel_size[0
] * self.mask_conv.kernel_size[1]
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, x):
output = self.feature_conv(x)
if self.feature_conv.bias is not None:
output_bias = self.feature_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output, device=x.device)
with torch.no_grad():
ones = torch.ones(1, 1, x.size(2), x.size(3), device=x.device)
output_mask = self.mask_conv(ones)
output_mask = self.window_size / output_mask
output = (output - output_bias) * output_mask + output_bias
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Unfold | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fv/cfvbrurv5lz3rkzaxqwxcrh64fbuhdx3wo5d5st37jjpzi5mju5t.py
# Topologically Sorted Source Nodes: [unfold, x], Original ATen: [aten.im2col, aten.view]
# Source node to ATen node mapping:
# unfold => index
# x => view_2
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, None, %unsqueeze_5, %add_1]), kwargs = {})
# %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%permute_1, [4, -1, 4, 4]), kwargs = {})
triton_poi_fused_im2col_view_0 = async_compile.triton('triton_poi_fused_im2col_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_im2col_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_im2col_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tl.store(in_out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [unfold, x], Original ATen: [aten.im2col, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_im2col_view_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class Unfold(torch.nn.Module):
"""Module for unfolding tensor.
Performs strided crops on 2d (image) tensors. Stride is assumed to be half the crop size.
"""
def __init__(self, img_size, fold_size):
"""
Args:
img_size: Input size.
fold_size: Crop size.
"""
super().__init__()
fold_stride = fold_size // 2
self.fold_size = fold_size
self.fold_stride = fold_stride
self.n_locs = 2 * (img_size // fold_size) - 1
self.unfold = torch.nn.Unfold((self.fold_size, self.fold_size),
stride=(self.fold_stride, self.fold_stride))
def forward(self, x):
"""Unfolds tensor.
Args:
x: Input tensor.
Returns:
torch.Tensor: Unfolded tensor.
"""
N = x.size(0)
x = self.unfold(x).reshape(N, -1, self.fold_size, self.fold_size,
self.n_locs * self.n_locs).permute(0, 4, 1, 2, 3).reshape(N *
self.n_locs * self.n_locs, -1, self.fold_size, self.fold_size)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_size': 4, 'fold_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_im2col_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask)
tl.store(in_out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1),
torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_im2col_view_0[grid(256)](buf1, arg0_1, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class UnfoldNew(torch.nn.Module):
"""Module for unfolding tensor.
Performs strided crops on 2d (image) tensors. Stride is assumed to be half the crop size.
"""
def __init__(self, img_size, fold_size):
"""
Args:
img_size: Input size.
fold_size: Crop size.
"""
super().__init__()
fold_stride = fold_size // 2
self.fold_size = fold_size
self.fold_stride = fold_stride
self.n_locs = 2 * (img_size // fold_size) - 1
self.unfold = torch.nn.Unfold((self.fold_size, self.fold_size),
stride=(self.fold_stride, self.fold_stride))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| XrosLiang/GraphCL | Unfold | false | 5,998 | [
"MIT"
] | 1 | fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | https://github.com/XrosLiang/GraphCL/tree/fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | import torch
import torch.utils.data
class Model(torch.nn.Module):
"""Module for unfolding tensor.
Performs strided crops on 2d (image) tensors. Stride is assumed to be half the crop size.
"""
def __init__(self, img_size, fold_size):
"""
Args:
img_size: Input size.
fold_size: Crop size.
"""
super().__init__()
fold_stride = fold_size // 2
self.fold_size = fold_size
self.fold_stride = fold_stride
self.n_locs = 2 * (img_size // fold_size) - 1
self.unfold = torch.nn.Unfold((self.fold_size, self.fold_size),
stride=(self.fold_stride, self.fold_stride))
def forward(self, x):
"""Unfolds tensor.
Args:
x: Input tensor.
Returns:
torch.Tensor: Unfolded tensor.
"""
N = x.size(0)
x = self.unfold(x).reshape(N, -1, self.fold_size, self.fold_size,
self.n_locs * self.n_locs).permute(0, 4, 1, 2, 3).reshape(N *
self.n_locs * self.n_locs, -1, self.fold_size, self.fold_size)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SelfAttentionBatch | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/er/cer6wyrsysb27mf4qvndrgb67as5vdr6kshph65tjwxcgctyw35g.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vm/cvmz5zb5hklcqpb7jp3aicaos5mk3fnzhheoduici43zwr4y2zyd.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf2, buf5, 1, 4, grid=grid(1), stream=stream0)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0), primals_2, out=buf6)
del buf5
return (reinterpret_tensor(buf6, (4, ), (1, ), 0), buf1, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionBatch(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionBatch, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)),
requires_grad=True)
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad
=True)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e, dim=0)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0),
primals_2, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4,), (1,), 0
), buf1, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)
class SelfAttentionBatchNew(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionBatchNew, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)),
requires_grad=True)
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad
=True)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Xiaolong-Qi/CRSLab | SelfAttentionBatch | false | 5,999 | [
"MIT"
] | 1 | d507378c86f4996727bf062482e1f224486d4533 | https://github.com/Xiaolong-Qi/CRSLab/tree/d507378c86f4996727bf062482e1f224486d4533 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super().__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)),
requires_grad=True)
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)), requires_grad
=True)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e, dim=0)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
PriorDiscriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7z/c7zsuucunqdovb2xa6tywxjxwmolzjzdk72ratro7fi3qvgyqb7c.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf5, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class PriorDiscriminator(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.l0 = nn.Linear(input_dim, input_dim)
self.l1 = nn.Linear(input_dim, input_dim)
self.l2 = nn.Linear(input_dim, 1)
def forward(self, x):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return torch.sigmoid(self.l2(h))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(64)](buf5, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class PriorDiscriminatorNew(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.l0 = nn.Linear(input_dim, input_dim)
self.l1 = nn.Linear(input_dim, input_dim)
self.l2 = nn.Linear(input_dim, 1)
def forward(self, input_0):
primals_1 = self.l0.weight
primals_2 = self.l0.bias
primals_4 = self.l1.weight
primals_5 = self.l1.bias
primals_6 = self.l2.weight
primals_7 = self.l2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| XrosLiang/GraphCL | PriorDiscriminator | false | 6,000 | [
"MIT"
] | 1 | fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | https://github.com/XrosLiang/GraphCL/tree/fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.l0 = nn.Linear(input_dim, input_dim)
self.l1 = nn.Linear(input_dim, input_dim)
self.l2 = nn.Linear(input_dim, 1)
def forward(self, x):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return torch.sigmoid(self.l2(h))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
L1_Charbonnier_loss_color | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xh/cxh5cqwbp7qibxa2lcevnqg2cyp2zfkficnrfhc5hdioyttmdvug.py
# Topologically Sorted Source Nodes: [neg, diff, diff_sq, diff_sq_color, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.mean, aten.sqrt]
# Source node to ATen node mapping:
# add_1 => add_1
# diff => add
# diff_sq => mul
# diff_sq_color => mean
# error => sqrt
# loss => mean_1
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_mul_neg_sqrt_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp6 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp11 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp12 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp17 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp18 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp7 = -tmp6
tmp8 = tmp5 + tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = -tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp19 = -tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diff, diff_sq, diff_sq_color, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.mean, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
class L1_Charbonnier_loss_color(_Loss):
"""
L1 Charbonnierloss color
"""
def __init__(self, para):
super(L1_Charbonnier_loss_color, self).__init__()
self.eps = 0.001
def forward(self, X, Y):
diff = torch.add(X, -Y)
diff_sq = diff * diff
diff_sq_color = torch.mean(diff_sq, 1, True)
error = torch.sqrt(diff_sq_color + self.eps * self.eps)
loss = torch.mean(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'para': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp7 = -tmp6
tmp8 = tmp5 + tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = -tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp19 = -tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1_Charbonnier_loss_colorNew(_Loss):
"""
L1 Charbonnierloss color
"""
def __init__(self, para):
super(L1_Charbonnier_loss_colorNew, self).__init__()
self.eps = 0.001
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| YDDDDG/3D2Unet | L1_Charbonnier_loss_color | false | 6,001 | [
"MIT"
] | 1 | daca056958fb2ae319dc18a350e04b3cefe0d99f | https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f | import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
class Model(_Loss):
"""
L1 Charbonnierloss color
"""
def __init__(self, para):
super().__init__()
self.eps = 0.001
def forward(self, X, Y):
diff = torch.add(X, -Y)
diff_sq = diff * diff
diff_sq_color = torch.mean(diff_sq, 1, True)
error = torch.sqrt(diff_sq_color + self.eps * self.eps)
loss = torch.mean(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
L1_Charbonnier_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/em/cemopf37cajc2ye56a3n75dw2wrilpxcfiwvulkl63jui2orhf5e.py
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# diff => add
# error => sqrt
# loss => mean
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_mul_neg_sqrt_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
class L1_Charbonnier_loss(_Loss):
"""
L1 Charbonnierloss
"""
def __init__(self, para):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 0.001
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps * self.eps)
loss = torch.mean(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'para': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1_Charbonnier_lossNew(_Loss):
"""
L1 Charbonnierloss
"""
def __init__(self, para):
super(L1_Charbonnier_lossNew, self).__init__()
self.eps = 0.001
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| YDDDDG/3D2Unet | L1_Charbonnier_loss | false | 6,002 | [
"MIT"
] | 1 | daca056958fb2ae319dc18a350e04b3cefe0d99f | https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f | import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
class Model(_Loss):
"""
L1 Charbonnierloss
"""
def __init__(self, para):
super().__init__()
self.eps = 0.001
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps * self.eps)
loss = torch.mean(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4s/c4szhh35rbtilv4zlnanegnq2hofrkvv7yac3nsynw6qjxjbg3tg.py
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# bilinear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3a/c3a7jkhbifqenis2g23pmqmidmnwe5he2pwgbrunksukoz44fhmm.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# logits => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf2 = buf1
del buf1
# Topologically Sorted Source Nodes: [bilinear_1], Original ATen: [aten._trilinear]
buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, primals_4, buf4, buf5, 32, grid=grid(32), stream=stream0)
del buf2
del buf4
del primals_4
return (buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_h):
super(Discriminator, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = torch.unsqueeze(c, 1)
c_x = c_x.expand_as(h_pl)
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
def get_inputs():
return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(
buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf2 = buf1
del buf1
buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor(
buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf2, primals_4, buf4, buf5, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del buf2
del buf4
del primals_4
return buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_5, (16, 4), (4, 1), 0)
class DiscriminatorNew(nn.Module):
def __init__(self, n_h):
super(DiscriminatorNew, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.f_k.weight
primals_4 = self.f_k.bias
primals_1 = input_0
primals_2 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| XrosLiang/GraphCL | Discriminator | false | 6,003 | [
"MIT"
] | 1 | fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | https://github.com/XrosLiang/GraphCL/tree/fdf9fabcdaddbc17e5c8b7ac9e9d2bdfe4acc56c | import torch
import torch.utils.data
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = torch.unsqueeze(c, 1)
c_x = c_x.expand_as(h_pl)
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
def get_inputs():
return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
ResNetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ye/cye7l2jaf362rrj43bugwtiqncxa3xnlfse2dg7bg4rqz2wqm2ew.py
# Topologically Sorted Source Nodes: [instance_norm, relu], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# instance_norm => add, repeat, rsqrt, var_mean
# relu => relu
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_repeat_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr4 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bc/cbcwynkq5bb3y76cfpnqt7cehag2wscv6fybffrqv46ffwhr46np.py
# Topologically Sorted Source Nodes: [instance_norm_1, add], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# add => add_4
# instance_norm_1 => add_2, repeat_2, rsqrt_1, var_mean_1
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_3), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_repeat_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp18 = tl.load(in_ptr2 + (r1 + (16*x0)), xmask, other=0.0)
tmp27 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp1 - tmp11
tmp20 = 16.0
tmp21 = tmp17 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tmp25 * tmp0
tmp28 = tmp26 + tmp27
tmp29 = tmp18 + tmp28
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr4 + (x0), tmp24, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm, relu], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0.run(primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_1, add], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_repeat_1.run(primals_6, buf7, primals_2, primals_7, buf8, buf9, buf13, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf7, buf8, reinterpret_tensor(buf12, (16, ), (1, ), 0), reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.onnx
from torch.nn import Conv2d
from torch.nn import InstanceNorm2d
from torch.nn.init import kaiming_normal_
from torch.nn.init import xavier_normal_
from torch import relu
def create_init_function(method: 'str'='none'):
def init(module: 'Module'):
if method == 'none':
return module
elif method == 'he':
kaiming_normal_(module.weight)
return module
elif method == 'xavier':
xavier_normal_(module.weight)
return module
else:
raise ('Invalid initialization method %s' % method)
return init
def Conv3(in_channels: 'int', out_channels: 'int', initialization_method='he'
) ->Module:
init = create_init_function(initialization_method)
return init(Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False))
class ResNetBlock(Module):
def __init__(self, num_channels: 'int', initialization_method: 'str'='he'):
super().__init__()
self.conv1 = Conv3(num_channels, num_channels, initialization_method)
self.norm1 = InstanceNorm2d(num_features=num_channels, affine=True)
self.conv2 = Conv3(num_channels, num_channels, initialization_method)
self.norm2 = InstanceNorm2d(num_features=num_channels, affine=True)
def forward(self, x):
return x + self.norm2(self.conv2(relu(self.norm1(self.conv1(x)))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.onnx
from torch.nn import Conv2d
from torch.nn import InstanceNorm2d
from torch.nn.init import kaiming_normal_
from torch.nn.init import xavier_normal_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp18 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp27 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp1 - tmp11
tmp20 = 16.0
tmp21 = tmp17 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tmp25 * tmp0
tmp28 = tmp26 + tmp27
tmp29 = tmp18 + tmp28
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp24, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0[grid(16)](
primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16,), (1,), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_repeat_1[grid(16)](
primals_6, buf7, primals_2, primals_7, buf8, buf9, buf13, buf12,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, buf8,
reinterpret_tensor(buf12, (16,), (1,), 0), reinterpret_tensor(buf9,
(1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16,
1, 1), (16, 1, 1, 1), 0))
def create_init_function(method: 'str'='none'):
def init(module: 'Module'):
if method == 'none':
return module
elif method == 'he':
kaiming_normal_(module.weight)
return module
elif method == 'xavier':
xavier_normal_(module.weight)
return module
else:
raise ('Invalid initialization method %s' % method)
return init
def Conv3(in_channels: 'int', out_channels: 'int', initialization_method='he'
) ->Module:
init = create_init_function(initialization_method)
return init(Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False))
class ResNetBlockNew(Module):
def __init__(self, num_channels: 'int', initialization_method: 'str'='he'):
super().__init__()
self.conv1 = Conv3(num_channels, num_channels, initialization_method)
self.norm1 = InstanceNorm2d(num_features=num_channels, affine=True)
self.conv2 = Conv3(num_channels, num_channels, initialization_method)
self.norm2 = InstanceNorm2d(num_features=num_channels, affine=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.norm1.weight
primals_4 = self.norm1.bias
primals_5 = self.conv2.weight
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| XiaoSanGit/talking-head-anime-landing | ResNetBlock | false | 6,004 | [
"MIT"
] | 1 | 36dbf1b8aef7357cda2a3524cb0c533f32670394 | https://github.com/XiaoSanGit/talking-head-anime-landing/tree/36dbf1b8aef7357cda2a3524cb0c533f32670394 | from torch.nn import Module
import torch
import torch.onnx
from torch.nn import Conv2d
from torch.nn import InstanceNorm2d
from torch.nn.init import kaiming_normal_
from torch.nn.init import xavier_normal_
from torch import relu
def create_init_function(method: 'str'='none'):
def init(module: 'Module'):
if method == 'none':
return module
elif method == 'he':
kaiming_normal_(module.weight)
return module
elif method == 'xavier':
xavier_normal_(module.weight)
return module
else:
raise ('Invalid initialization method %s' % method)
return init
def Conv3(in_channels: 'int', out_channels: 'int', initialization_method='he'
) ->Module:
init = create_init_function(initialization_method)
return init(Conv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False))
class Model(Module):
def __init__(self, num_channels: 'int', initialization_method: 'str'='he'):
super().__init__()
self.conv1 = Conv3(num_channels, num_channels, initialization_method)
self.norm1 = InstanceNorm2d(num_features=num_channels, affine=True)
self.conv2 = Conv3(num_channels, num_channels, initialization_method)
self.norm2 = InstanceNorm2d(num_features=num_channels, affine=True)
def forward(self, x):
return x + self.norm2(self.conv2(relu(self.norm1(self.conv1(x)))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BCE_LOSS | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iq/ciq6u3cthzffche7yjsjgsixbdkejugkm5jzccbbpxzkzkqyp2pf.py
# Topologically Sorted Source Nodes: [scatter_, binary_cross_entropy_with_logits, sub, loss], Original ATen: [aten.scatter, aten.binary_cross_entropy_with_logits, aten.sub, aten.mul]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default_1, log1p, mean, minimum, mul, neg, sub_1, sub_2, sub_3
# loss => mul_1
# scatter_ => scatter_upon_const_tensor
# sub => sub
# Graph fragment:
# %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 2], background_val: 0, dtype: torch.float32, dim: 1, selector: %view, val: 1})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %scatter_upon_const_tensor), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.6931471805599453), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %sub), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 2), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 2)
r0 = rindex % 2
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (r2), None)
tmp1 = r0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp3 - tmp5
tmp8 = 0.6931471805599453
tmp9 = tmp7 - tmp8
tmp10 = tmp6 * tmp9
tmp11 = triton_helpers.minimum(tmp4, tmp9)
tmp12 = tl_math.abs(tmp9)
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = libdevice.log1p(tmp14)
tmp16 = tmp11 - tmp15
tmp17 = tmp10 - tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 8.0
tmp22 = tmp20 / tmp21
tmp23 = 2.0
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 2), (2, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [scatter_, binary_cross_entropy_with_logits, sub, loss], Original ATen: [aten.scatter, aten.binary_cross_entropy_with_logits, aten.sub, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0.run(buf1, arg1_1, arg0_1, 1, 8, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch.nn.modules.loss import _Loss
import torch.optim
import torch.nn
class BCE_LOSS(_Loss):
def __init__(self):
super().__init__()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def forward(self, input, label):
one_hot = torch.zeros_like(input)
C = input.size(1)
label = label.reshape(one_hot.shape[0], 1)
one_hot.scatter_(1, label, 1)
loss = self.bce_loss(input - math.log(C), one_hot) * C
return loss
def get_inputs():
return [torch.rand([4, 2]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
import torch.optim
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 2
r0 = rindex % 2
r2 = rindex
tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + r2, None)
tmp1 = r0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp3 - tmp5
tmp8 = 0.6931471805599453
tmp9 = tmp7 - tmp8
tmp10 = tmp6 * tmp9
tmp11 = triton_helpers.minimum(tmp4, tmp9)
tmp12 = tl_math.abs(tmp9)
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = libdevice.log1p(tmp14)
tmp16 = tmp11 - tmp15
tmp17 = tmp10 - tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 8.0
tmp22 = tmp20 / tmp21
tmp23 = 2.0
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 2), (2, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 8, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCE_LOSSNew(_Loss):
def __init__(self):
super().__init__()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| YZW-explorer/EOD | BCE_LOSS | false | 6,005 | [
"Apache-2.0"
] | 1 | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | https://github.com/YZW-explorer/EOD/tree/f10e64de86c0f356ebf5c7e923f4042eec4207b1 | import math
import torch
from torch.nn.modules.loss import _Loss
import torch.optim
import torch.nn
class Model(_Loss):
def __init__(self):
super().__init__()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def forward(self, input, label):
one_hot = torch.zeros_like(input)
C = input.size(1)
label = label.reshape(one_hot.shape[0], 1)
one_hot.scatter_(1, label, 1)
loss = self.bce_loss(input - math.log(C), one_hot) * C
return loss
def get_inputs():
return [torch.rand([4, 2]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return []
|
PSNR | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nv/cnvhuia7crizj32ndnegecbcifyviwcvedgkhchfhsy556zzrhzu.py
# Topologically Sorted Source Nodes: [mse], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mse => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = triton_helpers.minimum(tmp4, tmp1)
tmp6 = libdevice.nearbyint(tmp5)
tmp8 = tmp7 * tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = triton_helpers.minimum(tmp9, tmp1)
tmp11 = libdevice.nearbyint(tmp10)
tmp12 = tmp6 - tmp11
tmp13 = 0.00392156862745098
tmp14 = tmp12 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/47/c47uaghu4jphn4mfwcpdwxhwcbkl44jrwo3daxqnbvrcdihmek6i.py
# Topologically Sorted Source Nodes: [mse, log10, psnr, mean_1], Original ATen: [aten.mean, aten.log10, aten.mul]
# Source node to ATen node mapping:
# log10 => log10
# mean_1 => mean_1
# mse => mean
# psnr => mul_2
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%mean,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, -10), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_2,), kwargs = {})
triton_per_fused_log10_mean_mul_1 = async_compile.triton('triton_per_fused_log10_mean_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log10_mean_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log10_mean_mul_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = libdevice.log10(tmp2)
tmp4 = -10.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mse], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(arg0_1, arg1_1, buf0, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mse, log10, psnr, mean_1], Original ATen: [aten.mean, aten.log10, aten.mul]
triton_per_fused_log10_mean_mul_1.run(buf2, buf0, 1, 4, grid=grid(1), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
def normalize_reverse(x, centralize=False, normalize=False, val_range=255.0):
if normalize:
x = x * val_range
if centralize:
x = x + val_range / 2
return x
class PSNR(_Loss):
def __init__(self, centralize=False, normalize=True):
super(PSNR, self).__init__()
self.centralize = centralize
self.normalize = normalize
self.val_range = 255
def _quantize(self, img):
img = normalize_reverse(img, centralize=self.centralize, normalize=
self.normalize, val_range=self.val_range)
img = img.clamp(0, self.val_range).round()
return img
def forward(self, x, y):
diff = self._quantize(x) - self._quantize(y)
if x.dim() == 3:
n = 1
elif x.dim() == 4:
n = x.size(0)
elif x.dim() == 5:
n = x.size(0) * x.size(1)
mse = diff.div(self.val_range).pow(2).contiguous().view(n, -1).mean(dim
=-1)
psnr = -10 * mse.log10()
return psnr.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = triton_helpers.minimum(tmp4, tmp1)
tmp6 = libdevice.nearbyint(tmp5)
tmp8 = tmp7 * tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = triton_helpers.minimum(tmp9, tmp1)
tmp11 = libdevice.nearbyint(tmp10)
tmp12 = tmp6 - tmp11
tmp13 = 0.00392156862745098
tmp14 = tmp12 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused_log10_mean_mul_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = libdevice.log10(tmp2)
tmp4 = -10.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](arg0_1, arg1_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_log10_mean_mul_1[grid(1)](buf2, buf0, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf0
return buf2,
def normalize_reverse(x, centralize=False, normalize=False, val_range=255.0):
if normalize:
x = x * val_range
if centralize:
x = x + val_range / 2
return x
class PSNRNew(_Loss):
def __init__(self, centralize=False, normalize=True):
super(PSNRNew, self).__init__()
self.centralize = centralize
self.normalize = normalize
self.val_range = 255
def _quantize(self, img):
img = normalize_reverse(img, centralize=self.centralize, normalize=
self.normalize, val_range=self.val_range)
img = img.clamp(0, self.val_range).round()
return img
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| YDDDDG/3D2Unet | PSNR | false | 6,006 | [
"MIT"
] | 1 | daca056958fb2ae319dc18a350e04b3cefe0d99f | https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f | import torch
import torch.utils.data
from torch.nn.modules.loss import _Loss
def normalize_reverse(x, centralize=False, normalize=False, val_range=255.0):
if normalize:
x = x * val_range
if centralize:
x = x + val_range / 2
return x
class Model(_Loss):
def __init__(self, centralize=False, normalize=True):
super().__init__()
self.centralize = centralize
self.normalize = normalize
self.val_range = 255
def _quantize(self, img):
img = normalize_reverse(img, centralize=self.centralize, normalize=
self.normalize, val_range=self.val_range)
img = img.clamp(0, self.val_range).round()
return img
def forward(self, x, y):
diff = self._quantize(x) - self._quantize(y)
if x.dim() == 3:
n = 1
elif x.dim() == 4:
n = x.size(0)
elif x.dim() == 5:
n = x.size(0) * x.size(1)
mse = diff.div(self.val_range).pow(2).contiguous().view(n, -1).mean(dim
=-1)
psnr = -10 * mse.log10()
return psnr.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DownsampleA | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/po/cpormshkmgo6plpevuhstnszvw2ukqdlgn5bsa3pi3exb4taa4m5.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%avg_pool2d, %mul], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0)
tmp14 = tmp13 * tmp6
tmp15 = 0.0
tmp16 = tmp14 * tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp9, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4, 'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = tmp13 * tmp6
tmp15 = 0.0
tmp16 = tmp14 * tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp9, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DownsampleANew(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleANew, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| YasufumiSakai/Pruning | DownsampleA | false | 6,007 | [
"BSD-3-Clause"
] | 1 | 5c8bc0d780fab41e1bd894b0360bd50e14cd0571 | https://github.com/YasufumiSakai/Pruning/tree/5c8bc0d780fab41e1bd894b0360bd50e14cd0571 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, nIn, nOut, stride):
super().__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 1]
|
Gradient | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/g4/cg4ecqzb4dmjtwbsjdl47dmonnkxdihi4rkdo72tfcvuyrt35n7o.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt, %sqrt_1, %sqrt_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 36) % 3
x0 = xindex % 36
x2 = (xindex // 108)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (36*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr1 + (x0 + (36*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 1e-06
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 2, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + (x0 + (36*x2)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr3 + (x0 + (36*x2)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp10
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp18, tmp25, tmp26)
tmp28 = tmp0 >= tmp16
tmp29 = tl.full([1], 3, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr4 + (x0 + (36*x2)), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp31
tmp33 = tl.load(in_ptr5 + (x0 + (36*x2)), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp33 * tmp33
tmp35 = tmp32 + tmp34
tmp36 = tmp35 + tmp10
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp28, tmp37, tmp38)
tmp40 = tl.where(tmp18, tmp27, tmp39)
tmp41 = tl.where(tmp4, tmp14, tmp40)
tl.store(out_ptr0 + (x3), tmp41, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x0_v], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x0_h], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x1_v], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x1_h], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x2_v], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 6, 6), (36, 36, 6, 1))
del arg1_1
# Topologically Sorted Source Nodes: [x2_h], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 6, 6), (36, 36, 6, 1))
del arg0_1
del arg2_1
buf6 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, buf2, buf3, buf4, buf5, buf6, 432, grid=grid(432), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
class Gradient(nn.Module):
def __init__(self):
super(Gradient, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-06)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-06)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-06)
x = torch.cat([x0, x1, x2], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 36 % 3
x0 = xindex % 36
x2 = xindex // 108
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 36 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr1 + (x0 + 36 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 1e-06
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 2, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + (x0 + 36 * x2), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 36 * x2), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp10
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp18, tmp25, tmp26)
tmp28 = tmp0 >= tmp16
tl.full([1], 3, tl.int64)
tmp31 = tl.load(in_ptr4 + (x0 + 36 * x2), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp31
tmp33 = tl.load(in_ptr5 + (x0 + 36 * x2), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tmp33 * tmp33
tmp35 = tmp32 + tmp34
tmp36 = tmp35 + tmp10
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp28, tmp37, tmp38)
tmp40 = tl.where(tmp18, tmp27, tmp39)
tmp41 = tl.where(tmp4, tmp14, tmp40)
tl.store(out_ptr0 + x3, tmp41, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 6, 6), (36, 36, 6, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 6, 6), (36, 36, 6, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 6, 6), (36, 36, 6, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 6, 6), (36, 36, 6, 1))
buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 6, 6), (36, 36, 6, 1))
del arg1_1
buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 6, 6), (36, 36, 6, 1))
del arg0_1
del arg2_1
buf6 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(432)](buf0, buf1, buf2, buf3, buf4,
buf5, buf6, 432, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
return buf6,
class GradientNew(nn.Module):
def __init__(self):
super(GradientNew, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, input_0):
arg1_1 = self.weight_h
arg2_1 = self.weight_v
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| YDDDDG/3D2Unet | Gradient | false | 6,008 | [
"MIT"
] | 1 | daca056958fb2ae319dc18a350e04b3cefe0d99f | https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f | import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-06)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-06)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-06)
x = torch.cat([x0, x1, x2], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
L1GradientLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7g/c7gpnaa76kr65vahzryjxcg3mfyxvig5jcja3ssqewqlqwknemed.py
# Topologically Sorted Source Nodes: [x, x_1, loss], Original ATen: [aten.cat, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, mean, sub
# x => cat
# x_1 => cat_1
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt, %sqrt_1, %sqrt_2], 1), kwargs = {})
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt_3, %sqrt_4, %sqrt_5], 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %cat_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_cat_mean_sub_0 = async_compile.triton('triton_per_fused_abs_cat_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {13: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14), equal_to_1=(13,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_cat_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 432
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r1 = (rindex // 36) % 3
r0 = rindex % 36
r2 = (rindex // 108)
r3 = rindex
tmp0 = r1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 1e-06
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 2, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr3 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp10
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp18, tmp25, tmp26)
tmp28 = tmp0 >= tmp16
tmp29 = tl.full([1], 3, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr4 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp31
tmp33 = tl.load(in_ptr5 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp34 = tmp33 * tmp33
tmp35 = tmp32 + tmp34
tmp36 = tmp35 + tmp10
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp28, tmp37, tmp38)
tmp40 = tl.where(tmp18, tmp27, tmp39)
tmp41 = tl.where(tmp4, tmp14, tmp40)
tmp42 = tl.load(in_ptr6 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 * tmp42
tmp44 = tl.load(in_ptr7 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp46 + tmp10
tmp48 = libdevice.sqrt(tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp4, tmp48, tmp49)
tmp51 = tl.load(in_ptr8 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp52 = tmp51 * tmp51
tmp53 = tl.load(in_ptr9 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp54 = tmp53 * tmp53
tmp55 = tmp52 + tmp54
tmp56 = tmp55 + tmp10
tmp57 = libdevice.sqrt(tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp18, tmp57, tmp58)
tmp60 = tl.load(in_ptr10 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 * tmp60
tmp62 = tl.load(in_ptr11 + (tl.broadcast_to(r0 + (36*r2), [RBLOCK])), rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp63 = tmp62 * tmp62
tmp64 = tmp61 + tmp63
tmp65 = tmp64 + tmp10
tmp66 = libdevice.sqrt(tmp65)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp28, tmp66, tmp67)
tmp69 = tl.where(tmp18, tmp59, tmp68)
tmp70 = tl.where(tmp4, tmp50, tmp69)
tmp71 = tmp41 - tmp70
tmp72 = tl_math.abs(tmp71)
tmp73 = tl.broadcast_to(tmp72, [RBLOCK])
tmp75 = tl.where(rmask, tmp73, 0)
tmp76 = triton_helpers.promote_to_tensor(tl.sum(tmp75, 0))
tmp77 = 432.0
tmp78 = tmp76 / tmp77
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp78, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x0_v], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x0_h], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x1_v], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x1_h], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x2_v], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x2_h], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 6, 6), (36, 36, 6, 1))
del arg0_1
# Topologically Sorted Source Nodes: [x1_h_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x2_v_1], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x2_h_1], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x0_v_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 6, 6), (36, 36, 6, 1))
# Topologically Sorted Source Nodes: [x0_h_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 1, 6, 6), (36, 36, 6, 1))
del arg2_1
# Topologically Sorted Source Nodes: [x1_v_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 6, 6), (36, 36, 6, 1))
del arg1_1
del arg3_1
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x, x_1, loss], Original ATen: [aten.cat, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_cat_mean_sub_0.run(buf15, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9, buf10, buf11, buf12, 1, 432, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf10
del buf11
del buf12
del buf2
del buf3
del buf4
del buf5
del buf7
del buf8
del buf9
return (buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
from torch.nn.modules.loss import _Loss
class Gradient(nn.Module):
def __init__(self):
super(Gradient, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-06)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-06)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-06)
x = torch.cat([x0, x1, x2], dim=1)
return x
class L1GradientLoss(_Loss):
"""
Gradient loss
"""
def __init__(self, para):
super(L1GradientLoss, self).__init__()
self.get_grad = Gradient()
self.L1 = nn.L1Loss()
def forward(self, x, y):
grad_x = self.get_grad(x)
grad_y = self.get_grad(y)
loss = self.L1(grad_x, grad_y)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'para': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.nn.functional as F
import torch.utils.data
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 432
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex // 36 % 3
r0 = rindex % 36
r2 = rindex // 108
tmp0 = r1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]), rmask &
tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]), rmask &
tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 1e-06
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 2, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr3 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp10
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp18, tmp25, tmp26)
tmp28 = tmp0 >= tmp16
tl.full([1], 3, tl.int64)
tmp31 = tl.load(in_ptr4 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 * tmp31
tmp33 = tl.load(in_ptr5 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp34 = tmp33 * tmp33
tmp35 = tmp32 + tmp34
tmp36 = tmp35 + tmp10
tmp37 = libdevice.sqrt(tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp28, tmp37, tmp38)
tmp40 = tl.where(tmp18, tmp27, tmp39)
tmp41 = tl.where(tmp4, tmp14, tmp40)
tmp42 = tl.load(in_ptr6 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 * tmp42
tmp44 = tl.load(in_ptr7 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp4, eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp46 + tmp10
tmp48 = libdevice.sqrt(tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp4, tmp48, tmp49)
tmp51 = tl.load(in_ptr8 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp52 = tmp51 * tmp51
tmp53 = tl.load(in_ptr9 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp18, eviction_policy='evict_last', other=0.0)
tmp54 = tmp53 * tmp53
tmp55 = tmp52 + tmp54
tmp56 = tmp55 + tmp10
tmp57 = libdevice.sqrt(tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp18, tmp57, tmp58)
tmp60 = tl.load(in_ptr10 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 * tmp60
tmp62 = tl.load(in_ptr11 + tl.broadcast_to(r0 + 36 * r2, [RBLOCK]),
rmask & tmp28, eviction_policy='evict_last', other=0.0)
tmp63 = tmp62 * tmp62
tmp64 = tmp61 + tmp63
tmp65 = tmp64 + tmp10
tmp66 = libdevice.sqrt(tmp65)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp28, tmp66, tmp67)
tmp69 = tl.where(tmp18, tmp59, tmp68)
tmp70 = tl.where(tmp4, tmp50, tmp69)
tmp71 = tmp41 - tmp70
tmp72 = tl_math.abs(tmp71)
tmp73 = tl.broadcast_to(tmp72, [RBLOCK])
tmp75 = tl.where(rmask, tmp73, 0)
tmp76 = triton_helpers.promote_to_tensor(tl.sum(tmp75, 0))
tmp77 = 432.0
tmp78 = tmp76 / tmp77
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp78, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 6, 6), (36, 36, 6, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 6, 6), (36, 36, 6, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 6, 6), (36, 36, 6, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 6, 6), (36, 36, 6, 1))
buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 6, 6), (36, 36, 6, 1))
buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 6, 6), (36, 36, 6, 1))
del arg0_1
buf10 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 6, 6), (36, 36, 6, 1))
buf11 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 6, 6), (36, 36, 6, 1))
buf12 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 6, 6), (36, 36, 6, 1))
buf7 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 6, 6), (36, 36, 6, 1))
buf8 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf8, (4, 1, 6, 6), (36, 36, 6, 1))
del arg2_1
buf9 = extern_kernels.convolution(reinterpret_tensor(arg3_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(2, 2
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 6, 6), (36, 36, 6, 1))
del arg1_1
del arg3_1
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14
del buf14
get_raw_stream(0)
triton_per_fused_abs_cat_mean_sub_0[grid(1)](buf15, buf0, buf1,
buf2, buf3, buf4, buf5, buf7, buf8, buf9, buf10, buf11, buf12,
1, 432, num_warps=4, num_stages=1)
del buf0
del buf1
del buf10
del buf11
del buf12
del buf2
del buf3
del buf4
del buf5
del buf7
del buf8
del buf9
return buf15,
class Gradient(nn.Module):
def __init__(self):
super(Gradient, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-06)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-06)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-06)
x = torch.cat([x0, x1, x2], dim=1)
return x
class L1GradientLossNew(_Loss):
"""
Gradient loss
"""
def __init__(self, para):
super(L1GradientLossNew, self).__init__()
self.get_grad = Gradient()
self.L1 = nn.L1Loss()
def forward(self, input_0, input_1):
arg1_1 = self.get_grad.weight_h
arg2_1 = self.get_grad.weight_v
arg0_1 = input_0
arg3_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| YDDDDG/3D2Unet | L1GradientLoss | false | 6,009 | [
"MIT"
] | 1 | daca056958fb2ae319dc18a350e04b3cefe0d99f | https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f | import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
from torch.nn.modules.loss import _Loss
class Gradient(nn.Module):
def __init__(self):
super().__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-06)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-06)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-06)
x = torch.cat([x0, x1, x2], dim=1)
return x
class Model(_Loss):
"""
Gradient loss
"""
def __init__(self, para):
super().__init__()
self.get_grad = Gradient()
self.L1 = nn.L1Loss()
def forward(self, x, y):
grad_x = self.get_grad(x)
grad_y = self.get_grad(y)
loss = self.L1(grad_x, grad_y)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PosEnc | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dc/cdcj5vdeoxdfumt6aoglzzd64d2tfy25xaywf7azakyldey5igj7.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
class PosEnc(nn.Module):
def __init__(self, C, ks):
super().__init__()
self.weight = nn.Parameter(torch.randn(1, C, ks, ks))
def forward(self, x):
return x + self.weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'C': 4, 'ks': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class PosEncNew(nn.Module):
def __init__(self, C, ks):
super().__init__()
self.weight = nn.Parameter(torch.randn(1, C, ks, ks))
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| XuelianCheng/ppuda | PosEnc | false | 6,010 | [
"MIT"
] | 1 | d5b89928e430e2d5b976f84b1ea66b4b901e6cda | https://github.com/XuelianCheng/ppuda/tree/d5b89928e430e2d5b976f84b1ea66b4b901e6cda | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
class Model(nn.Module):
def __init__(self, C, ks):
super().__init__()
self.weight = nn.Parameter(torch.randn(1, C, ks, ks))
def forward(self, x):
return x + self.weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SpatialAttn | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bi/cbibg4lc7dc2sbbxe7z4v4ebtulhxgixrw57yvrp2yyfgel2ghb5.py
# Topologically Sorted Source Nodes: [sum_1, itruediv], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# itruediv => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %sum_1), kwargs = {})
triton_per_fused_div_sum_0 = async_compile.triton('triton_per_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (16 + r0), None)
tmp3 = tl.load(in_ptr0 + (32 + r0), None)
tmp5 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp8 / tmp11
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o4/co4t5cpqvwzgwid4gtpymik6alub22ihe334fobcw5otrmsoxavz.py
# Topologically Sorted Source Nodes: [itruediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# itruediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %sum_1), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_1, %div, 0, 0), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xf/cxfi22rac3t4r7mazbza6uabzf5oenc5fn7fw2a7l7czenb5tshs.py
# Topologically Sorted Source Nodes: [sum_2, sum_3, itruediv_2], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# itruediv_2 => div_2
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_8,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_16,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_17, %sum_3), kwargs = {})
triton_per_fused_div_sum_2 = async_compile.triton('triton_per_fused_div_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_sum_2(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (r0 + (4*((r0 % 4) // 4))), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (4*((r0 % 4) // 4))), None)
tmp12 = tl.load(in_ptr0 + (r0 + (8*((r0 % 4) // 4))), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + (8*((r0 % 4) // 4))), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + (8*((r0 % 4) // 4))), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.full([1, 1], 2, tl.int32)
tmp10 = tmp9 == tmp0
tmp11 = tmp0 == tmp0
tmp14 = tl.where(tmp2, tmp12, tmp13)
tmp15 = tmp14 / tmp8
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tmp9 == tmp1
tmp19 = tl.where(tmp17, tmp12, tmp18)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tl.where(tmp10, tmp16, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp21 / tmp24
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp25, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tp/ctp65xzcaq3yt6j5igmjpdwgic52mltna4dnuwk4o2jezwaqqwcq.py
# Topologically Sorted Source Nodes: [itruediv_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# itruediv_2 => div_2
# Graph fragment:
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_17, %sum_3), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_19, %div_2, 0, 2), kwargs = {})
triton_poi_fused_div_3 = async_compile.triton('triton_poi_fused_div_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (16 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr1 + (x0 + (8*((x0 % 4) // 4)) + (16*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = tmp4 == tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp4 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp14 = tmp11 / tmp13
tmp15 = tl.where(tmp6, tmp14, tmp11)
tmp16 = tmp0 == tmp7
tmp18 = tl.where(tmp16, tmp9, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tl.where(tmp5, tmp15, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wl/cwlv3v7i5tdm7rgttqnrgro6gjlltqdiyasmyr7rg7nydh3gicwq.py
# Topologically Sorted Source Nodes: [sum_4], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_4 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_24,), kwargs = {})
triton_per_fused_sum_4 = async_compile.triton('triton_per_fused_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (32 + r0 + (4*((r0 % 4) // 4))), None)
tmp4 = tl.load(in_ptr0 + (48 + r0 + (4*((r0 % 4) // 4))), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jj/cjjqjoipyiqihfkftp47jgsxj24bv47ixmjupvxwkuttwgjj6aii.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_32, %select_26, 0, 3), kwargs = {})
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (32 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (48 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (x0 + (8*((x0 % 4) // 4)) + (16*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp1 == tmp4
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp11 = tmp8 / tmp10
tmp12 = tl.where(tmp3, tmp11, tmp8)
tmp13 = tmp0 == tmp4
tmp15 = tl.where(tmp13, tmp6, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tmp17 = tl.where(tmp2, tmp12, tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, itruediv], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_sum_0.run(arg0_1, buf1, 1, 16, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [itruediv], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf1, arg0_1, buf2, 64, grid=grid(64), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_2, sum_3, itruediv_2], Original ATen: [aten.sum, aten.div]
triton_per_fused_div_sum_2.run(buf2, buf3, buf5, 1, 16, grid=grid(1), stream=stream0)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [itruediv_2], Original ATen: [aten.div]
triton_poi_fused_div_3.run(buf5, buf2, buf3, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_4], Original ATen: [aten.sum]
triton_per_fused_sum_4.run(buf6, buf7, 1, 16, grid=grid(1), stream=stream0)
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
del buf6
del buf7
return (reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SpatialAttn(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttn, self).__init__()
def forward(self, x):
x = x.mean(1, keepdim=True)
h = x.size(2)
w = x.size(3)
x = x.view(x.size(0), -1)
z = x
for b in range(x.size(0)):
z[b] /= torch.sum(z[b])
z = z.view(x.size(0), 1, h, w)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (16 + r0), None)
tmp3 = tl.load(in_ptr0 + (32 + r0), None)
tmp5 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp8 / tmp11
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_div_sum_2(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 4 * (r0 % 4 // 4)), None)
tmp12 = tl.load(in_ptr0 + (r0 + 8 * (r0 % 4 // 4)), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 8 * (r0 % 4 // 4)), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + 8 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.full([1, 1], 2, tl.int32)
tmp10 = tmp9 == tmp0
tmp11 = tmp0 == tmp0
tmp14 = tl.where(tmp2, tmp12, tmp13)
tmp15 = tmp14 / tmp8
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tmp9 == tmp1
tmp19 = tl.where(tmp17, tmp12, tmp18)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tl.where(tmp10, tmp16, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp21 / tmp24
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp25, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (16 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = tmp4 == tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp4 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp14 = tmp11 / tmp13
tmp15 = tl.where(tmp6, tmp14, tmp11)
tmp16 = tmp0 == tmp7
tmp18 = tl.where(tmp16, tmp9, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tl.where(tmp5, tmp15, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (32 + r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (48 + r0 + 4 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (32 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (48 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp1 == tmp4
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp11 = tmp8 / tmp10
tmp12 = tl.where(tmp3, tmp11, tmp8)
tmp13 = tmp0 == tmp4
tmp15 = tl.where(tmp13, tmp6, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tmp17 = tl.where(tmp2, tmp12, tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_div_sum_0[grid(1)](arg0_1, buf1, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_1[grid(64)](buf1, arg0_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf1
del buf1
triton_per_fused_div_sum_2[grid(1)](buf2, buf3, buf5, 1, 16, XBLOCK
=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_3[grid(64)](buf5, buf2, buf3, buf6, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del buf5
buf7 = buf3
del buf3
triton_per_fused_sum_4[grid(1)](buf6, buf7, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf8 = buf2
del buf2
triton_poi_fused_5[grid(64)](buf6, buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del buf7
return reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class SpatialAttnNew(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttnNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| YUE-FAN/Spatial-Attention | SpatialAttn | false | 6,011 | [
"MIT"
] | 1 | 71cf324f0fb0829355e5ca322058ebbb9d8be610 | https://github.com/YUE-FAN/Spatial-Attention/tree/71cf324f0fb0829355e5ca322058ebbb9d8be610 | import torch
from torch import nn
class Model(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super().__init__()
def forward(self, x):
x = x.mean(1, keepdim=True)
h = x.size(2)
w = x.size(3)
x = x.view(x.size(0), -1)
z = x
for b in range(x.size(0)):
z[b] /= torch.sum(z[b])
z = z.view(x.size(0), 1, h, w)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
fpn_module | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w5/cw5skfgpv5nrk42leelcp4yltyqwex7mnbyce3n3bj4ebm2z7azu.py
# Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# p5 => convolution
# Graph fragment:
# %convolution : [num_users=6] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p2/cp2fi3pz7wgtw7k3ewpcsvsdmiyrtnshjj7ceaxpeyql6g7mwnrl.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# interpolate => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=15] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7c/c7cez33jguz44eoylm76d2wh2v6hokivwtceg43fbezeqagf63px.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# interpolate => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=13] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 63), kwargs = {})
triton_poi_fused_add_clamp_2 = async_compile.triton('triton_poi_fused_add_clamp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 63, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hv/chvbryvhsclk6e7ndzaxmzi3b3jbxckwuysrcqfxtybwd77qaes4.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# interpolate => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=13] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = triton_helpers.minimum(tmp12, tmp4)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dn/cdno47sb4iuwlzp3u5bnrx5nfsdaxpfpneo66rlgu43bh4opcxpe.py
# Topologically Sorted Source Nodes: [conv2d_1, interpolate, p4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_2, mul_3, mul_4, sub_3, sub_4, sub_6
# p4 => add_7
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
# %add_7 : [num_users=6] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %convolution_1), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096)
x6 = xindex
x3 = (xindex // 4096) % 256
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x6), None)
tmp33 = tl.load(in_ptr7 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (64*tmp4) + (4096*x2)), None, eviction_policy='evict_last')
tmp11 = tmp10 + tmp1
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr2 + (tmp13 + (64*tmp4) + (4096*x2)), None, eviction_policy='evict_last')
tmp15 = tmp14 - tmp9
tmp17 = tmp15 * tmp16
tmp18 = tmp9 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tmp23 = tl.load(in_ptr2 + (tmp8 + (64*tmp22) + (4096*x2)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (tmp13 + (64*tmp22) + (4096*x2)), None, eviction_policy='evict_last')
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp16
tmp27 = tmp23 + tmp26
tmp28 = tmp27 - tmp18
tmp30 = tmp28 * tmp29
tmp31 = tmp18 + tmp30
tmp34 = tmp32 + tmp33
tmp35 = tmp31 + tmp34
tl.store(in_out_ptr0 + (x6), tmp35, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/he/che3l4rv6rafbjk6jebrweq77ki4pey5sowezfrn6tmor5h7vaw6.py
# Topologically Sorted Source Nodes: [p5_1, p4_1, p3_1, p5_2, p4_2, p3_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# p3_1 => convolution_9
# p3_2 => _unsafe_index_20, _unsafe_index_21, _unsafe_index_22, _unsafe_index_23, add_42, add_43, mul_27, mul_28, mul_29, sub_38, sub_39, sub_41
# p4_1 => convolution_7
# p4_2 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_35, add_36, mul_22, mul_23, mul_24, sub_31, sub_32, sub_34
# p5_1 => convolution_5
# p5_2 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_28, add_29, mul_17, mul_18, mul_19, sub_24, sub_25, sub_27
# Graph fragment:
# %convolution_5 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_15, %primals_16, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_7 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_6, %primals_19, %primals_20, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_9 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_23, %primals_24, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_2), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_29, %add_28), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_3), kwargs = {})
# %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %clamp_max_2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_22), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %clamp_max_2), kwargs = {})
# %add_36 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_23), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_36, %add_35), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %clamp_max_3), kwargs = {})
# %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_21 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_22 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_23 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_21, %_unsafe_index_20), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %clamp_max_2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_20, %mul_27), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_23, %_unsafe_index_22), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %clamp_max_2), kwargs = {})
# %add_43 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_22, %mul_28), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_43, %add_42), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %clamp_max_3), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_5 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x5 = (xindex // 4096)
x2 = (xindex // 4096) % 128
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + (x2), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr11 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tl.load(in_ptr8 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr8 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp37
tmp41 = tmp40 - tmp38
tmp42 = tmp41 * tmp19
tmp43 = tmp38 + tmp42
tmp44 = tl.load(in_ptr8 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp45 = tmp44 + tmp37
tmp46 = tl.load(in_ptr8 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp47 = tmp46 + tmp37
tmp48 = tmp47 - tmp45
tmp49 = tmp48 * tmp19
tmp50 = tmp45 + tmp49
tmp51 = tmp50 - tmp43
tmp52 = tmp51 * tmp34
tmp53 = tl.load(in_ptr10 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp55 = tmp53 + tmp54
tmp56 = tl.load(in_ptr10 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last')
tmp57 = tmp56 + tmp54
tmp58 = tmp57 - tmp55
tmp59 = tmp58 * tmp19
tmp60 = tmp55 + tmp59
tmp61 = tl.load(in_ptr10 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp62 = tmp61 + tmp54
tmp63 = tl.load(in_ptr10 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last')
tmp64 = tmp63 + tmp54
tmp65 = tmp64 - tmp62
tmp66 = tmp65 * tmp19
tmp67 = tmp62 + tmp66
tmp68 = tmp67 - tmp60
tmp69 = tmp68 * tmp34
tl.store(out_ptr0 + (x6), tmp21, None)
tl.store(out_ptr1 + (x6), tmp35, None)
tl.store(out_ptr2 + (x6), tmp43, None)
tl.store(out_ptr3 + (x6), tmp52, None)
tl.store(out_ptr4 + (x6), tmp60, None)
tl.store(out_ptr5 + (x6), tmp69, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qq/cqqy6dwgchmh6da5wvexvll4djghft4xnm4crpatgerre2lzchix.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_30, %add_37, %add_44, %convolution_11], 1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 512
x0 = xindex % 4096
x2 = (xindex // 2097152)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (524288*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (524288*x2)), tmp4, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (4096*((-128) + x1)) + (524288*x2)), tmp13, other=0.0)
tmp15 = tl.load(in_ptr3 + (x0 + (4096*((-128) + x1)) + (524288*x2)), tmp13, other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 384, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + (4096*((-256) + x1)) + (524288*x2)), tmp22, other=0.0)
tmp24 = tl.load(in_ptr5 + (x0 + (4096*((-256) + x1)) + (524288*x2)), tmp22, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 512, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr6 + (x0 + (4096*((-384) + x1)) + (524288*x2)), tmp28, other=0.0)
tmp32 = tl.load(in_ptr7 + ((-384) + x1), tmp28, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + (x3), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aa/caad7kyjzosottzpivtkindhaxpfkjxgqczfry2hxfky2fvdwwvu.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution_12
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_29, %primals_30, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args
args.clear()
assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1))
assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_8, (256, ), (1, ))
assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_10, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_13, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_14, (256, ), (1, ))
assert_size_stride(primals_15, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_16, (128, ), (1, ))
assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_18, (256, ), (1, ))
assert_size_stride(primals_19, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (128, ), (1, ))
assert_size_stride(primals_21, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_22, (256, ), (1, ))
assert_size_stride(primals_23, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_24, (128, ), (1, ))
assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_26, (256, ), (1, ))
assert_size_stride(primals_27, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (128, ), (1, ))
assert_size_stride(primals_29, (4, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_30, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 4194304, grid=grid(4194304), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_1.run(buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_1.run(buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf7, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf9, 64, grid=grid(64), stream=stream0)
buf10 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, interpolate, p4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf10, buf3, buf5, buf1, buf6, buf7, buf4, buf9, primals_5, 4194304, grid=grid(4194304), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, interpolate_1, p3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf13, buf3, buf5, buf10, buf6, buf7, buf4, buf9, primals_8, 4194304, grid=grid(4194304), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf16 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, interpolate_2, p2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf16, buf3, buf5, buf13, buf6, buf7, buf4, buf9, primals_11, 4194304, grid=grid(4194304), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf1, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf18, primals_14, 4194304, grid=grid(4194304), stream=stream0)
del primals_14
# Topologically Sorted Source Nodes: [p5_1], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf10, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf21, primals_18, 4194304, grid=grid(4194304), stream=stream0)
del primals_18
# Topologically Sorted Source Nodes: [p4_1], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 64, 64), (524288, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf13, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf24, primals_22, 4194304, grid=grid(4194304), stream=stream0)
del primals_22
# Topologically Sorted Source Nodes: [p3_1], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf16, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf27, primals_26, 4194304, grid=grid(4194304), stream=stream0)
del primals_26
# Topologically Sorted Source Nodes: [p2_1], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf29 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf30 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf31 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf33 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [p5_1, p4_1, p3_1, p5_2, p4_2, p3_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_5.run(buf3, buf5, buf19, primals_16, buf6, buf7, buf4, buf9, buf22, primals_20, buf25, primals_24, buf29, buf30, buf31, buf32, buf33, buf34, 2097152, grid=grid(2097152), stream=stream0)
del buf19
del buf22
del buf25
del primals_16
del primals_20
del primals_24
buf35 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf29, buf30, buf31, buf32, buf33, buf34, buf28, primals_28, buf35, 8388608, grid=grid(8388608), stream=stream0)
del buf28
del buf29
del buf30
del buf31
del buf32
del buf33
del buf34
del primals_28
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf37, primals_30, 65536, grid=grid(65536), stream=stream0)
del primals_30
return (buf37, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_17, primals_19, primals_21, primals_23, primals_25, primals_27, primals_29, buf1, buf3, buf4, buf5, buf6, buf7, buf9, buf10, buf13, buf16, buf18, buf21, buf24, buf27, buf35, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 2048, 64, 64), (8388608, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class fpn_module(nn.Module):
def __init__(self, numClass):
super(fpn_module, self).__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0
)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0
)
self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride=
1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear') + y
def forward(self, c2, c3, c4, c5):
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
p5 = self.smooth1_2(self.smooth1_1(p5))
p4 = self.smooth2_2(self.smooth2_1(p4))
p3 = self.smooth3_2(self.smooth3_1(p3))
p2 = self.smooth4_2(self.smooth4_1(p2))
output = self.classify(self._concatenate(p5, p4, p3, p2))
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]),
torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])]
def get_init_inputs():
return [[], {'numClass': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 63, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = triton_helpers.minimum(tmp12, tmp4)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x6 = xindex
x3 = xindex // 4096 % 256
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + x6, None)
tmp33 = tl.load(in_ptr7 + x3, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x2), None,
eviction_policy='evict_last')
tmp11 = tmp10 + tmp1
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * x2), None,
eviction_policy='evict_last')
tmp15 = tmp14 - tmp9
tmp17 = tmp15 * tmp16
tmp18 = tmp9 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * x2), None,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * x2), None,
eviction_policy='evict_last')
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp16
tmp27 = tmp23 + tmp26
tmp28 = tmp27 - tmp18
tmp30 = tmp28 * tmp29
tmp31 = tmp18 + tmp30
tmp34 = tmp32 + tmp33
tmp35 = tmp31 + tmp34
tl.store(in_out_ptr0 + x6, tmp35, None)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 128
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr11 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tl.load(in_ptr8 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr8 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp37
tmp41 = tmp40 - tmp38
tmp42 = tmp41 * tmp19
tmp43 = tmp38 + tmp42
tmp44 = tl.load(in_ptr8 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp45 = tmp44 + tmp37
tmp46 = tl.load(in_ptr8 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp47 = tmp46 + tmp37
tmp48 = tmp47 - tmp45
tmp49 = tmp48 * tmp19
tmp50 = tmp45 + tmp49
tmp51 = tmp50 - tmp43
tmp52 = tmp51 * tmp34
tmp53 = tl.load(in_ptr10 + (tmp8 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp55 = tmp53 + tmp54
tmp56 = tl.load(in_ptr10 + (tmp15 + 64 * tmp4 + 4096 * x5), None,
eviction_policy='evict_last')
tmp57 = tmp56 + tmp54
tmp58 = tmp57 - tmp55
tmp59 = tmp58 * tmp19
tmp60 = tmp55 + tmp59
tmp61 = tl.load(in_ptr10 + (tmp8 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp62 = tmp61 + tmp54
tmp63 = tl.load(in_ptr10 + (tmp15 + 64 * tmp25 + 4096 * x5), None,
eviction_policy='evict_last')
tmp64 = tmp63 + tmp54
tmp65 = tmp64 - tmp62
tmp66 = tmp65 * tmp19
tmp67 = tmp62 + tmp66
tmp68 = tmp67 - tmp60
tmp69 = tmp68 * tmp34
tl.store(out_ptr0 + x6, tmp21, None)
tl.store(out_ptr1 + x6, tmp35, None)
tl.store(out_ptr2 + x6, tmp43, None)
tl.store(out_ptr3 + x6, tmp52, None)
tl.store(out_ptr4 + x6, tmp60, None)
tl.store(out_ptr5 + x6, tmp69, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 512
x0 = xindex % 4096
x2 = xindex // 2097152
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-128 + x1) + 524288 * x2),
tmp13, other=0.0)
tmp15 = tl.load(in_ptr3 + (x0 + 4096 * (-128 + x1) + 524288 * x2),
tmp13, other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 384, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-256 + x1) + 524288 * x2),
tmp22, other=0.0)
tmp24 = tl.load(in_ptr5 + (x0 + 4096 * (-256 + x1) + 524288 * x2),
tmp22, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 512, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-384 + x1) + 524288 * x2),
tmp28, other=0.0)
tmp32 = tl.load(in_ptr7 + (-384 + x1), tmp28, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + x3, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1))
assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_10, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_13, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_14, (256,), (1,))
assert_size_stride(primals_15, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_16, (128,), (1,))
assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (128,), (1,))
assert_size_stride(primals_21, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_24, (128,), (1,))
assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_26, (256,), (1,))
assert_size_stride(primals_27, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (128,), (1,))
assert_size_stride(primals_29, (4, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4194304)](buf1, primals_2,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf7,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf9,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = buf2
del buf2
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf10, buf3, buf5, buf1, buf6, buf7, buf4, buf9, primals_5,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf11 = extern_kernels.convolution(primals_9, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf13 = buf11
del buf11
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf13, buf3, buf5, buf10, buf6, buf7, buf4, buf9, primals_8,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_8
buf14 = extern_kernels.convolution(primals_12, primals_10, stride=(
1, 1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf16 = buf14
del buf14
triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304)
](buf16, buf3, buf5, buf13, buf6, buf7, buf4, buf9, primals_11,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf1, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_0[grid(4194304)](buf18, primals_14,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_14
buf19 = extern_kernels.convolution(buf18, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf20 = extern_kernels.convolution(buf10, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_0[grid(4194304)](buf21, primals_18,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_18
buf22 = extern_kernels.convolution(buf21, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf23 = extern_kernels.convolution(buf13, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_0[grid(4194304)](buf24, primals_22,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_22
buf25 = extern_kernels.convolution(buf24, primals_23, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf26 = extern_kernels.convolution(buf16, primals_25, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_0[grid(4194304)](buf27, primals_26,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_26
buf28 = extern_kernels.convolution(buf27, primals_27, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf29 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf30 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf31 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf33 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_sub_5[grid(2097152)
](buf3, buf5, buf19, primals_16, buf6, buf7, buf4, buf9, buf22,
primals_20, buf25, primals_24, buf29, buf30, buf31, buf32,
buf33, buf34, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del buf19
del buf22
del buf25
del primals_16
del primals_20
del primals_24
buf35 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_6[grid(8388608)](buf29, buf30, buf31, buf32,
buf33, buf34, buf28, primals_28, buf35, 8388608, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf28
del buf29
del buf30
del buf31
del buf32
del buf33
del buf34
del primals_28
buf36 = extern_kernels.convolution(buf35, primals_29, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_7[grid(65536)](buf37, primals_30,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_30
return (buf37, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, primals_10, primals_12, primals_13, primals_15,
primals_17, primals_19, primals_21, primals_23, primals_25,
primals_27, primals_29, buf1, buf3, buf4, buf5, buf6, buf7, buf9,
buf10, buf13, buf16, buf18, buf21, buf24, buf27, buf35)
class fpn_moduleNew(nn.Module):
def __init__(self, numClass):
super(fpn_moduleNew, self).__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0
)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0
)
self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride=
1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear') + y
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.toplayer.weight
primals_2 = self.toplayer.bias
primals_13 = self.smooth1_1.weight
primals_5 = self.smooth1_1.bias
primals_17 = self.smooth2_1.weight
primals_8 = self.smooth2_1.bias
primals_21 = self.smooth3_1.weight
primals_11 = self.smooth3_1.bias
primals_25 = self.smooth4_1.weight
primals_14 = self.smooth4_1.bias
primals_15 = self.smooth1_2.weight
primals_16 = self.smooth1_2.bias
primals_19 = self.smooth2_2.weight
primals_20 = self.smooth2_2.bias
primals_23 = self.smooth3_2.weight
primals_24 = self.smooth3_2.bias
primals_27 = self.smooth4_2.weight
primals_28 = self.smooth4_2.bias
primals_4 = self.latlayer1.weight
primals_18 = self.latlayer1.bias
primals_7 = self.latlayer2.weight
primals_22 = self.latlayer2.bias
primals_10 = self.latlayer3.weight
primals_26 = self.latlayer3.bias
primals_29 = self.classify.weight
primals_30 = self.classify.bias
primals_12 = input_0
primals_9 = input_1
primals_6 = input_2
primals_3 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
| ShenZheng2000/Syn2Real-Pytorch | fpn_module | false | 6,012 | [
"MIT"
] | 1 | 214c800914e2bcd57d4ca74a4c8476a11e1b5905 | https://github.com/ShenZheng2000/Syn2Real-Pytorch/tree/214c800914e2bcd57d4ca74a4c8476a11e1b5905 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, numClass):
super().__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1
)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1
)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1,
padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0
)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0
)
self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride=
1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear') + y
def forward(self, c2, c3, c4, c5):
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
p5 = self.smooth1_2(self.smooth1_1(p5))
p4 = self.smooth2_2(self.smooth2_1(p4))
p3 = self.smooth3_2(self.smooth3_1(p3))
p2 = self.smooth4_2(self.smooth4_1(p2))
output = self.classify(self._concatenate(p5, p4, p3, p2))
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]),
torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])]
def get_init_inputs():
return [4]
|
RWKV_TimeMix | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rn/crnf7npolybp3ziafhkvky7ug44jdqf6s6i7qo4iucr5nuinu5z6.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd, %slice_6], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 4)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tmp9 & tmp4
tmp11 = tl.load(in_ptr0 + ((-4) + (4*x3) + x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tmp0 < tmp7
tmp16 = tl.load(in_ptr0 + (2 + (4*x3) + ((-2) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp13, tmp16)
tl.store(out_ptr0 + (x4), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/56/c56nwk7hgznquy6much5p6sbe3qi5nnlwqr672ulf3vo5x6ery3w.py
# Topologically Sorted Source Nodes: [k_1, k_2, sum_k], Original ATen: [aten.clamp, aten.exp, aten.cumsum]
# Source node to ATen node mapping:
# k_1 => clamp_max, clamp_min
# k_2 => exp
# sum_k => cumsum
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, -60), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 30), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%clamp_max,), kwargs = {})
# %cumsum : [num_users=2] = call_function[target=torch.ops.aten.cumsum.default](args = (%exp, 1), kwargs = {})
triton_per_fused_clamp_cumsum_exp_1 = async_compile.triton('triton_per_fused_clamp_cumsum_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[16, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_cumsum_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_cumsum_exp_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (16*x1)), xmask, other=0.0)
tmp1 = -60.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 30.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5.to(tl.float32)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp8, = tl.associative_scan((tmp7,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (4*r2) + (16*x1)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pq/cpqqmshlmrby6zt4x7emop4a3kfrsamwiurdxo77svikvrhnjgag.py
# Topologically Sorted Source Nodes: [k_1, k_2, mul], Original ATen: [aten.clamp, aten.exp, aten.mul]
# Source node to ATen node mapping:
# k_1 => clamp_max, clamp_min
# k_2 => exp
# mul => mul
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, -60), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 30), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%clamp_max,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %view_3), kwargs = {})
triton_poi_fused_clamp_exp_mul_2 = async_compile.triton('triton_poi_fused_clamp_exp_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_exp_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_exp_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = -60.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 30.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pp/cppsgdtib3dubobs5rvoaakazprv4rwvkabycpxcayy52hlducuc.py
# Topologically Sorted Source Nodes: [sigmoid, mul_1, rwkv], Original ATen: [aten.sigmoid, aten.mul, aten.div]
# Source node to ATen node mapping:
# mul_1 => mul_1
# rwkv => div
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_11), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %cumsum), kwargs = {})
triton_poi_fused_div_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_div_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
x2 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x3 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x3 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 / tmp4
tl.store(out_ptr0 + (x3 + (16*y0)), tmp5, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jr/cjrrz2amaxtkyz4hez7bs4nz5abg5nehrbfu44oaefbvzwxshwhm.py
# Topologically Sorted Source Nodes: [mul_2], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_2 => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, %primals_11), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_1, k_2, sum_k], Original ATen: [aten.clamp, aten.exp, aten.cumsum]
triton_per_fused_clamp_cumsum_exp_1.run(buf1, buf4, 16, 4, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_1, k_2, mul], Original ATen: [aten.clamp, aten.exp, aten.mul]
triton_poi_fused_clamp_exp_mul_2.run(buf1, buf2, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_8, reinterpret_tensor(buf5, (4, 4, 4), (1, 4, 16), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul_1, rwkv], Original ATen: [aten.sigmoid, aten.mul, aten.div]
triton_poi_fused_div_mul_sigmoid_3.run(buf3, buf6, buf4, buf7, 4, 16, grid=grid(4, 16), stream=stream0)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rwkv_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_10
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(buf8, primals_11, buf9, 64, grid=grid(64), stream=stream0)
return (buf9, primals_11, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, buf2, buf3, buf4, buf6, buf7, buf8, primals_9, reinterpret_tensor(primals_8, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 4), (1, 16, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class RWKV_TimeMix(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
assert config.n_attn % config.n_head == 0
self.layer_id = layer_id
self.ctx_len = config.ctx_len
self.n_head = config.n_head
self.head_size = config.n_attn // config.n_head
self.time_ww = nn.Parameter(torch.ones(config.n_head, config.
ctx_len, config.ctx_len))
self.time_gamma = nn.Parameter(torch.ones(config.ctx_len, 1))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.key = nn.Linear(config.n_embd, config.n_attn)
self.value = nn.Linear(config.n_embd, config.n_attn)
self.receptance = nn.Linear(config.n_embd, config.n_attn)
self.output = nn.Linear(config.n_attn, config.n_embd)
self.key.scale_init = 0
self.receptance.scale_init = 0
self.output.scale_init = 0
def forward(self, x):
B, T, C = x.size()
x = torch.cat([self.time_shift(x[:, :, :C // 2]), x[:, :, C // 2:]],
dim=-1)
k = self.key(x)
v = self.value(x)
r = self.receptance(x)
k = torch.clamp(k, max=30, min=-60)
k = torch.exp(k)
sum_k = torch.cumsum(k, dim=1)
kv = (k * v).view(B, T, self.n_head, self.head_size)
wkv = torch.einsum('htu,buhc->bthc', self.time_ww[:, :T, :T], kv
).contiguous().view(B, T, -1)
rwkv = torch.sigmoid(r) * wkv / sum_k
rwkv = self.output(rwkv)
return rwkv * self.time_gamma[:T, :]
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(n_attn=4, n_head=4, ctx_len=4,
n_embd=4), 'layer_id': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tmp9 & tmp4
tmp11 = tl.load(in_ptr0 + (-4 + 4 * x3 + x0), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp16 = tl.load(in_ptr0 + (2 + 4 * x3 + (-2 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp13, tmp16)
tl.store(out_ptr0 + x4, tmp17, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_clamp_cumsum_exp_1(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 16 * x1), xmask, other=0.0)
tmp1 = -60.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 30.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5.to(tl.float32)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp8, = tl.associative_scan((tmp7,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 4 * r2 + 16 * x1), tmp8, xmask)
@triton.jit
def triton_poi_fused_clamp_exp_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -60.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 30.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_div_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex
x1 = xindex % 4
x2 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x3 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x3 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 / tmp4
tl.store(out_ptr0 + (x3 + 16 * y0), tmp5, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_clamp_cumsum_exp_1[grid(16)](buf1, buf4, 16, 4,
XBLOCK=8, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_exp_mul_2[grid(64)](buf1, buf2, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_8, reinterpret_tensor(buf5, (4, 4, 4), (
1, 4, 16), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_mul_sigmoid_3[grid(4, 16)](buf3, buf6, buf4,
buf7, 4, 16, XBLOCK=16, YBLOCK=2, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_10
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_4[grid(64)](buf8, primals_11, buf9, 64, XBLOCK
=64, num_warps=1, num_stages=1)
return buf9, primals_11, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf1, buf2, buf3, buf4, buf6, buf7, buf8, primals_9, reinterpret_tensor(
primals_8, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4,
4, 4), (1, 16, 4), 0)
class RWKV_TimeMixNew(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
assert config.n_attn % config.n_head == 0
self.layer_id = layer_id
self.ctx_len = config.ctx_len
self.n_head = config.n_head
self.head_size = config.n_attn // config.n_head
self.time_ww = nn.Parameter(torch.ones(config.n_head, config.
ctx_len, config.ctx_len))
self.time_gamma = nn.Parameter(torch.ones(config.ctx_len, 1))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.key = nn.Linear(config.n_embd, config.n_attn)
self.value = nn.Linear(config.n_embd, config.n_attn)
self.receptance = nn.Linear(config.n_embd, config.n_attn)
self.output = nn.Linear(config.n_attn, config.n_embd)
self.key.scale_init = 0
self.receptance.scale_init = 0
self.output.scale_init = 0
def forward(self, input_0):
primals_1 = self.time_ww
primals_11 = self.time_gamma
primals_2 = self.key.weight
primals_3 = self.key.bias
primals_4 = self.value.weight
primals_5 = self.value.bias
primals_6 = self.receptance.weight
primals_7 = self.receptance.bias
primals_9 = self.output.weight
primals_10 = self.output.bias
primals_8 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| YUASDS/AI-Writer | RWKV_TimeMix | false | 6,013 | [
"BSD-3-Clause"
] | 1 | 6ec1e9548802ed5b5a2f1fd297595a52cb605266 | https://github.com/YUASDS/AI-Writer/tree/6ec1e9548802ed5b5a2f1fd297595a52cb605266 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
assert config.n_attn % config.n_head == 0
self.layer_id = layer_id
self.ctx_len = config.ctx_len
self.n_head = config.n_head
self.head_size = config.n_attn // config.n_head
self.time_ww = nn.Parameter(torch.ones(config.n_head, config.
ctx_len, config.ctx_len))
self.time_gamma = nn.Parameter(torch.ones(config.ctx_len, 1))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.key = nn.Linear(config.n_embd, config.n_attn)
self.value = nn.Linear(config.n_embd, config.n_attn)
self.receptance = nn.Linear(config.n_embd, config.n_attn)
self.output = nn.Linear(config.n_attn, config.n_embd)
self.key.scale_init = 0
self.receptance.scale_init = 0
self.output.scale_init = 0
def forward(self, x):
B, T, C = x.size()
x = torch.cat([self.time_shift(x[:, :, :C // 2]), x[:, :, C // 2:]],
dim=-1)
k = self.key(x)
v = self.value(x)
r = self.receptance(x)
k = torch.clamp(k, max=30, min=-60)
k = torch.exp(k)
sum_k = torch.cumsum(k, dim=1)
kv = (k * v).view(B, T, self.n_head, self.head_size)
wkv = torch.einsum('htu,buhc->bthc', self.time_ww[:, :T, :T], kv
).contiguous().view(B, T, -1)
rwkv = torch.sigmoid(r) * wkv / sum_k
rwkv = self.output(rwkv)
return rwkv * self.time_gamma[:T, :]
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(n_attn=4, n_head=4, ctx_len=4,
n_embd=4), 'layer_id': 1}]
|
LearnablePositionalEncoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cm/ccm7y3tyrccqsaknetzzscd6vebg5vwn32zhm3o3kao5h5m3e4de.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %slice_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1024, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1024, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LearnablePositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=1024):
super(LearnablePositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.uniform_(self.pe, -0.02, 0.02)
def forward(self, x):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1024, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnablePositionalEncodingNew(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=1024):
super(LearnablePositionalEncodingNew, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.uniform_(self.pe, -0.02, 0.02)
def forward(self, input_0):
primals_1 = self.pe
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| YexuZhou/TimeSeriesClassification_Transformer | LearnablePositionalEncoding | false | 6,014 | [
"MIT"
] | 1 | c20e00cfac4cfdb849e57e14c184f7d424257409 | https://github.com/YexuZhou/TimeSeriesClassification_Transformer/tree/c20e00cfac4cfdb849e57e14c184f7d424257409 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=1024):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.uniform_(self.pe, -0.02, 0.02)
def forward(self, x):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DW_PW_projection | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fd/cfdsvvyxn5snuqx5p5jadczfikvmdyxg3ui4a2q4btz7cwzkubem.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad1d]
# Source node to ATen node mapping:
# pad => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_2, [None, %clamp_max]), kwargs = {})
triton_poi_fused_replication_pad1d_0 = async_compile.triton('triton_poi_fused_replication_pad1d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ja/cjahugofdtd6lvhqmcreqjoasd43lef5xpvagx4uqrwyoeozs7e6.py
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad1d]
# Source node to ATen node mapping:
# pad_1 => _unsafe_index_1
# Graph fragment:
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%squeeze, [None, %clamp_max_1]), kwargs = {})
triton_poi_fused_replication_pad1d_1 = async_compile.triton('triton_poi_fused_replication_pad1d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_replication_pad1d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((5*x1) + ((4) * ((4) <= (x0)) + (x0) * ((x0) < (4)))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad1d]
stream0 = get_raw_stream(0)
triton_poi_fused_replication_pad1d_0.run(primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8), (0, 8, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf1, (1, 4, 5), (20, 5, 1))
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad1d]
triton_poi_fused_replication_pad1d_1.run(buf1, buf2, 20, grid=grid(20), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 5), (0, 5, 1), 0), primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 5), (20, 5, 1))
return (reinterpret_tensor(buf3, (4, 5), (5, 1), 0), primals_1, primals_3, reinterpret_tensor(buf0, (1, 4, 8), (32, 8, 1), 0), reinterpret_tensor(buf1, (4, 5), (5, 1), 0), reinterpret_tensor(buf2, (1, 4, 5), (20, 5, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DW_PW_projection(nn.Module):
def __init__(self, c_in, c_out, kernel_size, bias=False, padding_mode=
'replicate'):
super(DW_PW_projection, self).__init__()
self.dw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_in,
kernel_size=kernel_size, padding=int(kernel_size / 2), groups=
c_in, bias=bias, padding_mode=padding_mode)
self.pw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_out,
kernel_size=1, padding=0, groups=1, bias=bias, padding_mode=
padding_mode)
def forward(self, x):
x = self.dw_conv1d(x)
x = self.pw_conv1d(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'c_out': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_replication_pad1d_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (5 * x1 + (4 * (4 <= x0) + x0 * (x0 < 4))), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad1d_0[grid(32)](primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8
), (0, 8, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=4,
bias=None)
assert_size_stride(buf1, (1, 4, 5), (20, 5, 1))
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_replication_pad1d_1[grid(20)](buf1, buf2, 20,
XBLOCK=32, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 5
), (0, 5, 1), 0), primals_3, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (1, 4, 5), (20, 5, 1))
return reinterpret_tensor(buf3, (4, 5), (5, 1), 0
), primals_1, primals_3, reinterpret_tensor(buf0, (1, 4, 8), (32, 8,
1), 0), reinterpret_tensor(buf1, (4, 5), (5, 1), 0
), reinterpret_tensor(buf2, (1, 4, 5), (20, 5, 1), 0)
class DW_PW_projectionNew(nn.Module):
def __init__(self, c_in, c_out, kernel_size, bias=False, padding_mode=
'replicate'):
super(DW_PW_projectionNew, self).__init__()
self.dw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_in,
kernel_size=kernel_size, padding=int(kernel_size / 2), groups=
c_in, bias=bias, padding_mode=padding_mode)
self.pw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_out,
kernel_size=1, padding=0, groups=1, bias=bias, padding_mode=
padding_mode)
def forward(self, input_0):
primals_1 = self.dw_conv1d.weight
primals_3 = self.pw_conv1d.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| YexuZhou/TimeSeriesClassification_Transformer | DW_PW_projection | false | 6,015 | [
"MIT"
] | 1 | c20e00cfac4cfdb849e57e14c184f7d424257409 | https://github.com/YexuZhou/TimeSeriesClassification_Transformer/tree/c20e00cfac4cfdb849e57e14c184f7d424257409 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c_in, c_out, kernel_size, bias=False, padding_mode=
'replicate'):
super().__init__()
self.dw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_in,
kernel_size=kernel_size, padding=int(kernel_size / 2), groups=
c_in, bias=bias, padding_mode=padding_mode)
self.pw_conv1d = nn.Conv1d(in_channels=c_in, out_channels=c_out,
kernel_size=1, padding=0, groups=1, bias=bias, padding_mode=
padding_mode)
def forward(self, x):
x = self.dw_conv1d(x)
x = self.pw_conv1d(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ChannelSELayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# squeeze_tensor => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eb/cebpbupczy3a7z6yffgxybumq5trdt3jp5hxwuoo6w6cunzz7d7h.py
# Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# fc_out_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nv/cnvpklwd233bi4zltxm6adn2vglwhs7tyu5gqqi7widiugltrkb2.py
# Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# output_tensor => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 3.0
tmp3 = tmp1 + tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 6.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp1 * tmp7
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp11 = tmp0 * tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_3, 8, grid=grid(8), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, primals_1, buf1, buf3, buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
class ChannelSELayer(nn.Module):
"""
Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
MIT License
Copyright (c) 2018 Abhijit Guha Roy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1
):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer, self).__init__()
if dim_out is not None:
assert dim_out == num_channels, (dim_out, num_channels,
'only same dimensionality is supported')
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.stride = stride
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Hardswish()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output tensor
"""
batch_size, num_channels, _H, _W = input_tensor.size()
squeeze_tensor = input_tensor.reshape(batch_size, num_channels, -1
).mean(dim=2)
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
a, b = squeeze_tensor.size()
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
if self.stride > 1:
output_tensor = output_tensor[:, :, ::self.stride, ::self.stride]
return output_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = 3.0
tmp3 = tmp1 + tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 6.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp1 * tmp7
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp11 = tmp0 * tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4
), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, buf1, buf3, buf4, primals_4
class ChannelSELayerNew(nn.Module):
"""
Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
MIT License
Copyright (c) 2018 Abhijit Guha Roy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1
):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayerNew, self).__init__()
if dim_out is not None:
assert dim_out == num_channels, (dim_out, num_channels,
'only same dimensionality is supported')
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.stride = stride
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Hardswish()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| XuelianCheng/ppuda | ChannelSELayer | false | 6,016 | [
"MIT"
] | 1 | d5b89928e430e2d5b976f84b1ea66b4b901e6cda | https://github.com/XuelianCheng/ppuda/tree/d5b89928e430e2d5b976f84b1ea66b4b901e6cda | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils
from matplotlib import cm as cm
from torch.nn.parallel import *
from torchvision.models import *
from torchvision.datasets import *
class Model(nn.Module):
"""
Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
MIT License
Copyright (c) 2018 Abhijit Guha Roy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1
):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super().__init__()
if dim_out is not None:
assert dim_out == num_channels, (dim_out, num_channels,
'only same dimensionality is supported')
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.stride = stride
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Hardswish()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output tensor
"""
batch_size, num_channels, _H, _W = input_tensor.size()
squeeze_tensor = input_tensor.reshape(batch_size, num_channels, -1
).mean(dim=2)
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
a, b = squeeze_tensor.size()
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
if self.stride > 1:
output_tensor = output_tensor[:, :, ::self.stride, ::self.stride]
return output_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RWKV_ChannelMix | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rn/crnf7npolybp3ziafhkvky7ug44jdqf6s6i7qo4iucr5nuinu5z6.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%constant_pad_nd, %slice_6], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 4)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tmp9 & tmp4
tmp11 = tl.load(in_ptr0 + ((-4) + (4*x3) + x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tmp0 < tmp7
tmp16 = tl.load(in_ptr0 + (2 + (4*x3) + ((-2) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp13, tmp16)
tl.store(out_ptr0 + (x4), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2h/c2hegytq7hmgogj6drh6cdha4zg2zuv4vozh5vbekubt4zlpteyb.py
# Topologically Sorted Source Nodes: [mish, mul], Original ATen: [aten.mish, aten.mul]
# Source node to ATen node mapping:
# mish => exp, gt, log1p, mul, tanh, where
# mul => mul_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %log1p), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_3), kwargs = {})
triton_poi_fused_mish_mul_1 = async_compile.triton('triton_poi_fused_mish_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mish_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mish_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oj/cojhz6pvfpzarbxfv5iqqf34ppyloqixpicpqrh62t4lizd7jny2.py
# Topologically Sorted Source Nodes: [sigmoid, rwkv], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# rwkv => mul_2
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {})
triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10, ), (1, ))
assert_size_stride(primals_4, (10, 4), (4, 1))
assert_size_stride(primals_5, (10, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 10), (10, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 10), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 10), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [mish, mul], Original ATen: [aten.mish, aten.mul]
triton_poi_fused_mish_mul_1.run(buf1, buf2, buf4, 160, grid=grid(160), stream=stream0)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wkv], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (16, 10), (10, 1), 0), reinterpret_tensor(primals_8, (10, 4), (1, 10), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, rwkv], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_2.run(buf3, buf5, buf6, 64, grid=grid(64), stream=stream0)
return (buf6, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, buf2, buf3, reinterpret_tensor(buf4, (16, 10), (10, 1), 0), buf5, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn import functional as F
class RWKV_ChannelMix(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.layer_id = layer_id
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
hidden_sz = 5 * config.n_ffn // 2
self.key = nn.Linear(config.n_embd, hidden_sz)
self.value = nn.Linear(config.n_embd, hidden_sz)
self.weight = nn.Linear(hidden_sz, config.n_embd)
self.receptance = nn.Linear(config.n_embd, config.n_embd)
self.receptance.scale_init = 0
self.weight.scale_init = 0
def forward(self, x):
_B, _T, C = x.size()
x = torch.cat([self.time_shift(x[:, :, :C // 2]), x[:, :, C // 2:]],
dim=-1)
k = self.key(x)
v = self.value(x)
r = self.receptance(x)
wkv = self.weight(F.mish(k) * v)
rwkv = torch.sigmoid(r) * wkv
return rwkv
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(n_ffn=4, n_embd=4), 'layer_id': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tmp9 & tmp4
tmp11 = tl.load(in_ptr0 + (-4 + 4 * x3 + x0), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp16 = tl.load(in_ptr0 + (2 + 4 * x3 + (-2 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp13, tmp16)
tl.store(out_ptr0 + x4, tmp17, xmask)
@triton.jit
def triton_poi_fused_mish_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (10, 4), (4, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 10), (10, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 10), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 10), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
buf4 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
triton_poi_fused_mish_mul_1[grid(160)](buf1, buf2, buf4, 160,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (16, 10),
(10, 1), 0), reinterpret_tensor(primals_8, (10, 4), (1, 10), 0),
alpha=1, beta=1, out=buf5)
del primals_9
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(64)](buf3, buf5, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf6, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf1, buf2, buf3, reinterpret_tensor(buf4, (16, 10), (10, 1), 0
), buf5, primals_8
class RWKV_ChannelMixNew(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.layer_id = layer_id
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
hidden_sz = 5 * config.n_ffn // 2
self.key = nn.Linear(config.n_embd, hidden_sz)
self.value = nn.Linear(config.n_embd, hidden_sz)
self.weight = nn.Linear(hidden_sz, config.n_embd)
self.receptance = nn.Linear(config.n_embd, config.n_embd)
self.receptance.scale_init = 0
self.weight.scale_init = 0
def forward(self, input_0):
primals_2 = self.key.weight
primals_3 = self.key.bias
primals_4 = self.value.weight
primals_5 = self.value.bias
primals_8 = self.weight.weight
primals_7 = self.weight.bias
primals_6 = self.receptance.weight
primals_9 = self.receptance.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| YUASDS/AI-Writer | RWKV_ChannelMix | false | 6,017 | [
"BSD-3-Clause"
] | 1 | 6ec1e9548802ed5b5a2f1fd297595a52cb605266 | https://github.com/YUASDS/AI-Writer/tree/6ec1e9548802ed5b5a2f1fd297595a52cb605266 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.layer_id = layer_id
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
hidden_sz = 5 * config.n_ffn // 2
self.key = nn.Linear(config.n_embd, hidden_sz)
self.value = nn.Linear(config.n_embd, hidden_sz)
self.weight = nn.Linear(hidden_sz, config.n_embd)
self.receptance = nn.Linear(config.n_embd, config.n_embd)
self.receptance.scale_init = 0
self.weight.scale_init = 0
def forward(self, x):
_B, _T, C = x.size()
x = torch.cat([self.time_shift(x[:, :, :C // 2]), x[:, :, C // 2:]],
dim=-1)
k = self.key(x)
v = self.value(x)
r = self.receptance(x)
wkv = self.weight(F.mish(k) * v)
rwkv = torch.sigmoid(r) * wkv
return rwkv
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [1]
|
TransformerLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/52/c525by6g4qqvzpkamxv56vfyu6zlvbqfepthe4npppzrrv2boata.py
# Topologically Sorted Source Nodes: [means, x_zeromean], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# means => mean
# x_zeromean => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jb/cjbxtrhaay7rotsyysuftg23isdvjimy5xsuoa3afjgeuvhmcet3.py
# Topologically Sorted Source Nodes: [pow_1, variances, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# pow_1 => pow_1
# sqrt => sqrt
# variances => mean_1
# x => div
# x_1 => add_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uc/cucgrga44gtlwzw6ehoy4jrwzm5fghn3ljf7iugmdjhe6m7mjcas.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_5, %primals_6],), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((-8) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7v/c7vmyyo5wxjwhczncwe26c5vvxpnuc4d63pyhv44adhxrqqhbkd6.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# multi_head_attention_forward => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_5, %repeat_1],), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hr/chreiskn3cjyo3eff2gsxuz7ampglelwlsrzs7kmnwjm4l5lzi5w.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_1
# Graph fragment:
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = x2 % 4
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x0 % 4), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr1 + ((-4) + (x0 % 4)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tmp13 = tl.full([1], 12, tl.int64)
tmp14 = tmp1 < tmp13
tmp15 = tl.load(in_ptr2 + ((-8) + (x0 % 4)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(in_out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qq/cqqiwi6cphov3y4cw45pihso7kygfbxrztamsxi6ywv3sllhzvz5.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (5*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (5*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (5*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (5*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp1 - tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp8
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ti/ctik3dxqudjbuxk6lecrd6cxqp5ncnz6kele7crpn6ziyqip447n.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, div_1, exp, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 5)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/na/cnaxt66xj2zhaned4fgx5elukrpsiq623ib46puxb6rcdh4iismy.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yx/cyx6uhp26itou7morodlakbwephiwz7r7jx56snt2ukwrojhlmfp.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# multi_head_attention_forward => mean_2
# Graph fragment:
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_11, [1]), kwargs = {})
triton_poi_fused_mean_8 = async_compile.triton('triton_poi_fused_mean_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = (xindex // 20)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (80*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (20 + x0 + (80*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (40 + x0 + (80*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (60 + x0 + (80*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hu/chuhaqnv3eocytas2o5zkt2laav3cpr7gqx5s2m2tsctpi2kooe7.py
# Topologically Sorted Source Nodes: [x_3, means_1, x_zeromean_1, pow_2, variances_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# means_1 => mean_3
# pow_2 => pow_2
# variances_1 => mean_4
# x_3 => add_2
# x_zeromean_1 => sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_10), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mean_3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_9 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ps/cpsrkyasbkreyck4njrazduiuuaugpsanaxlzzqhnuxs7rd6xtwj.py
# Topologically Sorted Source Nodes: [x_3, means_1, x_zeromean_1, add_3, sqrt_1, x_4, mul_1, x_5], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_3 => add_3
# means_1 => mean_3
# mul_1 => mul_2
# sqrt_1 => sqrt_1
# x_3 => add_2
# x_4 => div_2
# x_5 => add_4
# x_zeromean_1 => sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_10), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mean_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_4, 1e-12), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_14, %div_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_15), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_10 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/in/cingjrjcsnplp4mnf7j3qaenyi3g6h7mejtd6hllewoqj33nanxc.py
# Topologically Sorted Source Nodes: [mul_2, truediv_2, erf, add_5, x_6], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
# Source node to ATen node mapping:
# add_5 => add_5
# erf => erf
# mul_2 => mul_3
# truediv_2 => div_3
# x_6 => mul_4
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_13, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_3,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %add_5), kwargs = {})
triton_poi_fused_add_div_erf_mul_11 = async_compile.triton('triton_poi_fused_add_div_erf_mul_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/55/c55kf6uasfvlyi4fs3esg22nysynhdxbi47sxnyfzfqq64pxh7xj.py
# Topologically Sorted Source Nodes: [x_3, x_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_3 => add_2
# x_8 => add_6
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_10), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_15), kwargs = {})
triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [means, x_zeromean], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, variances, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_1.run(primals_2, buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0)
del primals_2
del primals_3
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((12, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_4, primals_5, primals_6, buf3, 12, grid=grid(12), stream=stream0)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(buf3, (4, ), (1, ), 4), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(buf3, (4, ), (1, ), 8), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del buf3
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf5, primals_8, buf6, 80, grid=grid(80), stream=stream0)
del primals_8
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 64), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(buf7, primals_4, primals_5, primals_6, 64, grid=grid(64), stream=stream0)
del primals_4
del primals_5
del primals_6
buf8 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf4, primals_7, buf8, 80, grid=grid(80), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 1, 5), (1, 0, 16), 0), out=buf9)
buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 64), 0); del buf4 # reuse
buf11 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 64), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf12, buf10, buf11, 320, grid=grid(320), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf12, reinterpret_tensor(buf6, (16, 5, 1), (1, 16, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 16, 1), (16, 1, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf13, buf14, 4, 16, grid=grid(4, 16), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_10
buf16 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
triton_poi_fused_mean_8.run(buf12, buf16, 80, grid=grid(80), stream=stream0)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_3, means_1, x_zeromean_1, pow_2, variances_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_9.run(primals_1, buf15, buf17, buf18, 16, grid=grid(16), stream=stream0)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, means_1, x_zeromean_1, add_3, sqrt_1, x_4, mul_1, x_5], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_10.run(primals_14, primals_1, buf15, buf17, buf18, primals_15, buf19, 64, grid=grid(64), stream=stream0)
del buf17
del buf18
del primals_15
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_17
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, truediv_2, erf, add_5, x_6], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
triton_poi_fused_add_div_erf_mul_11.run(buf20, buf21, 64, grid=grid(64), stream=stream0)
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [x_3, x_8], Original ATen: [aten.add]
triton_poi_fused_add_12.run(buf23, primals_1, buf15, primals_19, 64, grid=grid(64), stream=stream0)
del primals_19
return (buf23, buf16, primals_1, primals_14, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), primals_18, primals_16, primals_9, reinterpret_tensor(buf6, (16, 1, 5), (1, 1, 16), 0), reinterpret_tensor(buf7, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf8, (16, 5, 1), (1, 16, 1), 0), primals_13, primals_12, primals_11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import uuid
from torch import Tensor
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from typing import Optional
from typing import Dict
from torch.nn import Parameter
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.
__bases__ if b != FairseqIncrementalState)
return cls
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int
) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = self.weight * x + self.bias
return x
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: 'str') ->str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str'
) ->Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str',
value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str,
Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=
0.0, bias=True, add_bias_kv=False, add_zero_attn=False,
self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: 'Optional[Tensor]', value:
'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None,
incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None,
need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask:
'Optional[Tensor]'=None, before_softmax: 'bool'=False,
need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (self.enable_torch_version and not self.onnx_trace and
incremental_state is None and not static_kv and not torch.jit.
is_scripting() and not need_head_weights):
assert key is not None and value is not None
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, torch.empty([0]), torch.cat((
self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k, self.bias_v, self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias, self.training,
key_padding_mask, need_weights, attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.q_proj.
weight, k_proj_weight=self.k_proj.weight, v_proj_weight=
self.v_proj.weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and 'prev_key' in saved_state:
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
_prev_key = saved_state['prev_key']
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.
head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if 'prev_value' in saved_state:
_prev_value = saved_state['prev_value']
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1,
self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: 'Optional[Tensor]' = None
if 'prev_key_padding_mask' in saved_state:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert k is not None and v is not None
key_padding_mask = (MultiheadAttention.
_append_prev_key_padding_mask(key_padding_mask=
key_padding_mask, prev_key_padding_mask=
prev_key_padding_mask, batch_size=bsz, src_len=k.size(1),
static_kv=static_kv))
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state,
saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights,
tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.
unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace
=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=
self.dropout, training=self.training)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
attn_weights: 'Optional[Tensor]' = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads,
tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]',
prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int',
src_len: 'int', static_kv: 'bool') ->Optional[Tensor]:
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
key_padding_mask.float()], dim=1)
elif prev_key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len -
prev_key_padding_mask.size(1)), device=
prev_key_padding_mask.device)
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
filler.float()], dim=1)
elif key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len - key_padding_mask.
size(1)), device=key_padding_mask.device)
new_key_padding_mask = torch.cat([filler.float(),
key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state,
input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str,
Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if result is not None:
return result
else:
empty_result: 'Dict[str, Optional[Tensor]]' = {}
return empty_result
def _set_input_buffer(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', buffer:
'Dict[str, Optional[Tensor]]'):
return self.set_incremental_state(incremental_state, 'attn_state',
buffer)
def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz:
'int'):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + 'in_proj_weight'):
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim]
items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim:
2 * dim]
items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim:
]
keys_to_remove.append(k)
k_bias = prefix + 'in_proj_bias'
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][:
dim]
items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][
dim:2 * dim]
items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][
2 * dim:]
keys_to_remove.append(prefix + 'in_proj_bias')
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(self, embed_dim, ffn_embed_dim, attention_heads,
add_bias_kv=True, use_esm1b_layer_norm=False):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else
ESM1LayerNorm)
self.self_attn = MultiheadAttention(self.embed_dim, self.
attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None,
need_head_weights=False):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(query=x, key=x, value=x, key_padding_mask=
self_attn_padding_mask, need_weights=True, need_head_weights=
need_head_weights, attn_mask=self_attn_mask)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'ffn_embed_dim': 4, 'attention_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import uuid
from torch import Tensor
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from typing import Optional
from typing import Dict
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x2 % 4
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x0 % 4, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr1 + (-4 + x0 % 4), tmp10 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1], 12, tl.int64)
tmp15 = tl.load(in_ptr2 + (-8 + x0 % 4), tmp12 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(in_out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp1 - tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp8
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (20 + x0 + 80 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (40 + x0 + 80 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (60 + x0 + 80 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_11(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(64)](primals_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(64)](primals_2,
buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
del primals_3
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((12,), (1,), torch.float32)
triton_poi_fused_cat_2[grid(12)](primals_4, primals_5, primals_6,
buf3, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 4),
reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 8),
reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf5)
del buf3
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](buf5, primals_8, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 64), 0)
del buf2
triton_poi_fused_mul_4[grid(64)](buf7, primals_4, primals_5,
primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
del primals_5
del primals_6
buf8 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](buf4, primals_7, buf8, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 1, 5), (1, 0,
16), 0), out=buf9)
buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 64), 0)
del buf4
buf11 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 64), 0)
del buf5
triton_poi_fused__softmax_5[grid(64)](buf9, buf10, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused__softmax_6[grid(320)](buf12, buf10, buf11, 320,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 1), 0)
del buf11
extern_kernels.bmm(buf12, reinterpret_tensor(buf6, (16, 5, 1), (1,
16, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 16, 1), (16, 1, 1), 0)
del buf10
triton_poi_fused_clone_7[grid(4, 16)](buf13, buf14, 4, 16, XBLOCK=
16, YBLOCK=4, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_10
buf16 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_mean_8[grid(80)](buf12, buf16, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_9[grid(16)](primals_1, buf15,
buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_10[grid(64)](primals_14,
primals_1, buf15, buf17, buf18, primals_15, buf19, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf17
del buf18
del primals_15
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf19, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf20)
del primals_17
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_11[grid(64)](buf20, buf21, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0)
del buf22
triton_poi_fused_add_12[grid(64)](buf23, primals_1, buf15,
primals_19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_19
return buf23, buf16, primals_1, primals_14, reinterpret_tensor(buf1, (
16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), buf15, reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), buf20, reinterpret_tensor(buf21, (16, 4), (4, 1), 0
), primals_18, primals_16, primals_9, reinterpret_tensor(buf6, (16,
1, 5), (1, 1, 16), 0), reinterpret_tensor(buf7, (16, 1, 4), (1, 1,
16), 0), reinterpret_tensor(buf8, (16, 5, 1), (1, 16, 1), 0
), primals_13, primals_12, primals_11
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.
__bases__ if b != FairseqIncrementalState)
return cls
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int
) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = self.weight * x + self.bias
return x
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: 'str') ->str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str'
) ->Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str',
value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str,
Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=
0.0, bias=True, add_bias_kv=False, add_zero_attn=False,
self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: 'Optional[Tensor]', value:
'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None,
incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None,
need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask:
'Optional[Tensor]'=None, before_softmax: 'bool'=False,
need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (self.enable_torch_version and not self.onnx_trace and
incremental_state is None and not static_kv and not torch.jit.
is_scripting() and not need_head_weights):
assert key is not None and value is not None
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, torch.empty([0]), torch.cat((
self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k, self.bias_v, self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias, self.training,
key_padding_mask, need_weights, attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.q_proj.
weight, k_proj_weight=self.k_proj.weight, v_proj_weight=
self.v_proj.weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and 'prev_key' in saved_state:
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
_prev_key = saved_state['prev_key']
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.
head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if 'prev_value' in saved_state:
_prev_value = saved_state['prev_value']
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1,
self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: 'Optional[Tensor]' = None
if 'prev_key_padding_mask' in saved_state:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert k is not None and v is not None
key_padding_mask = (MultiheadAttention.
_append_prev_key_padding_mask(key_padding_mask=
key_padding_mask, prev_key_padding_mask=
prev_key_padding_mask, batch_size=bsz, src_len=k.size(1),
static_kv=static_kv))
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state,
saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights,
tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.
unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace
=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=
self.dropout, training=self.training)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
attn_weights: 'Optional[Tensor]' = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads,
tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]',
prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int',
src_len: 'int', static_kv: 'bool') ->Optional[Tensor]:
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
key_padding_mask.float()], dim=1)
elif prev_key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len -
prev_key_padding_mask.size(1)), device=
prev_key_padding_mask.device)
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
filler.float()], dim=1)
elif key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len - key_padding_mask.
size(1)), device=key_padding_mask.device)
new_key_padding_mask = torch.cat([filler.float(),
key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state,
input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str,
Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if result is not None:
return result
else:
empty_result: 'Dict[str, Optional[Tensor]]' = {}
return empty_result
def _set_input_buffer(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', buffer:
'Dict[str, Optional[Tensor]]'):
return self.set_incremental_state(incremental_state, 'attn_state',
buffer)
def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz:
'int'):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + 'in_proj_weight'):
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim]
items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim:
2 * dim]
items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim:
]
keys_to_remove.append(k)
k_bias = prefix + 'in_proj_bias'
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][:
dim]
items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][
dim:2 * dim]
items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][
2 * dim:]
keys_to_remove.append(prefix + 'in_proj_bias')
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
class TransformerLayerNew(nn.Module):
"""Transformer layer block."""
def __init__(self, embed_dim, ffn_embed_dim, attention_heads,
add_bias_kv=True, use_esm1b_layer_norm=False):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else
ESM1LayerNorm)
self.self_attn = MultiheadAttention(self.embed_dim, self.
attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(self, input_0):
primals_7 = self.self_attn.bias_k
primals_8 = self.self_attn.bias_v
primals_9 = self.self_attn.k_proj.weight
primals_2 = self.self_attn.k_proj.bias
primals_11 = self.self_attn.v_proj.weight
primals_3 = self.self_attn.v_proj.bias
primals_12 = self.self_attn.q_proj.weight
primals_4 = self.self_attn.q_proj.bias
primals_13 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_6 = self.self_attn_layer_norm.weight
primals_10 = self.self_attn_layer_norm.bias
primals_16 = self.fc1.weight
primals_14 = self.fc1.bias
primals_18 = self.fc2.weight
primals_15 = self.fc2.bias
primals_17 = self.final_layer_norm.weight
primals_19 = self.final_layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1]
| William-Zhanng/Protein_affinity | TransformerLayer | false | 6,018 | [
"MIT"
] | 1 | 8abd12073b182274bf464ff23fd3be406c4e39ac | https://github.com/William-Zhanng/Protein_affinity/tree/8abd12073b182274bf464ff23fd3be406c4e39ac | import math
import torch
import uuid
from torch import Tensor
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from typing import Optional
from typing import Dict
from torch.nn import Parameter
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.
__bases__ if b != FairseqIncrementalState)
return cls
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int
) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = self.weight * x + self.bias
return x
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: 'str') ->str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str'
) ->Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str',
value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str,
Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=
0.0, bias=True, add_bias_kv=False, add_zero_attn=False,
self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embe
# ... truncated (>4000 chars) for memory efficiency |
RecCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lg/clg7crwnywzgnu4uplop4qfbz4zk7bthyikoulvxluscxbgvxr4g.py
# Topologically Sorted Source Nodes: [cross_entropy, mse_loss, mul, add], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mse_loss, aten.add]
# Source node to ATen node mapping:
# add => add
# cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2
# mse_loss => mean, pow_1, sub_2
# mul => mul_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_1), kwargs = {})
triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp19 = tl.load(in_ptr2 + (r3), None)
tmp20 = tl.load(in_ptr3 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = -tmp18
tmp27 = 0.015625
tmp28 = tmp26 * tmp27
tmp29 = 256.0
tmp30 = tmp25 / tmp29
tmp31 = 4.0
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, mse_loss, mul, add], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mse_loss, aten.add]
triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1.run(buf3, buf0, arg1_1, arg3_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del arg2_1
del arg3_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class RecCrossEntropyLoss(nn.Module):
def __init__(self, rec_ratio):
super(RecCrossEntropyLoss, self).__init__()
self.rec_ratio = rec_ratio
def forward(self, rec, inputs, logits, targets):
rec_loss = nn.MSELoss()
cls_loss = nn.CrossEntropyLoss()
return cls_loss(logits, targets) + self.rec_ratio * rec_loss(rec,
inputs)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'rec_ratio': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp19 = tl.load(in_ptr2 + r3, None)
tmp20 = tl.load(in_ptr3 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = -tmp18
tmp27 = 0.015625
tmp28 = tmp26 * tmp27
tmp29 = 256.0
tmp30 = tmp25 / tmp29
tmp31 = 4.0
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused__log_softmax_add_div_mse_loss_mul_neg_sum_1[grid(1)](
buf3, buf0, arg1_1, arg3_1, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg1_1
del arg2_1
del arg3_1
del buf0
return buf3,
class RecCrossEntropyLossNew(nn.Module):
def __init__(self, rec_ratio):
super(RecCrossEntropyLossNew, self).__init__()
self.rec_ratio = rec_ratio
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| YuhengZhi/Attention-Net-with-MNIST- | RecCrossEntropyLoss | false | 6,019 | [
"MIT"
] | 1 | aa6805e4df777dee1056d5f4f4f9a9b1e4a5e4ff | https://github.com/YuhengZhi/Attention-Net-with-MNIST-/tree/aa6805e4df777dee1056d5f4f4f9a9b1e4a5e4ff | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, rec_ratio):
super().__init__()
self.rec_ratio = rec_ratio
def forward(self, rec, inputs, logits, targets):
rec_loss = nn.MSELoss()
cls_loss = nn.CrossEntropyLoss()
return cls_loss(logits, targets) + self.rec_ratio * rec_loss(rec,
inputs)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BasicDeconv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fc/cfc2ly6hdnwcliv24f2k6prmw2piqgg6spz6xhzakrvpl323svai.py
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 784, grid=grid(784), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicDeconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconv, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.tconv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(784)](buf1,
primals_2, buf2, 784, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class BasicDeconvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconvNew, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, input_0):
primals_1 = self.tconv.weight
primals_2 = self.tconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Yuuchuin/C3_V2 | BasicDeconv | false | 6,020 | [
"MIT"
] | 1 | 92a5edbc2c2b3452c5f57e74f928591192293e81 | https://github.com/Yuuchuin/C3_V2/tree/92a5edbc2c2b3452c5f57e74f928591192293e81 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super().__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.tconv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.