entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Biaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True, diagonal=False
):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.diagonal = diagonal
if self.diagonal:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x))
else:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x,
n_in + bias_y))
self.reset_parameters()
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
if self.diagonal:
s = torch.einsum('bxi,byi,oi->boxy', x, y, self.weight)
else:
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True, diagonal=False
):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.diagonal = diagonal
if self.diagonal:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x))
else:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x,
n_in + bias_y))
self.reset_parameters()
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| ciaochiaociao/CLNER | Biaffine | false | 3,373 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Model(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True, diagonal=False
):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.diagonal = diagonal
if self.diagonal:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x))
else:
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x,
n_in + bias_y))
self.reset_parameters()
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
if self.diagonal:
s = torch.einsum('bxi,byi,oi->boxy', x, y, self.weight)
else:
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
cnn_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xr/cxrzuhg4v2k43m6sohikpdt2rwzcj5ayuvu3xeyydisg34liifbv.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 1), (4, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 4, grid=grid(4), stream=stream0)
del primals_2
return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class cnn_layer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, bias=True):
super(cnn_layer, self).__init__()
self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=bias)
self.relu = torch.nn.ReLU()
def forward(self, input):
return self.relu(self.conv(input))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 1), (4, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4)](buf1, primals_2,
buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4,
1), 0), buf2
class cnn_layerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, bias=True):
super(cnn_layerNew, self).__init__()
self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=bias)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ciaochiaociao/CLNER | cnn_layer | false | 3,374 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, bias=True):
super().__init__()
self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=bias)
self.relu = torch.nn.ReLU()
def forward(self, input):
return self.relu(self.conv(input))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ffnn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add]
# Source node to ATen node mapping:
# outputs => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
def get_shape(t):
return list(t.shape)
class ffnn(nn.Module):
def __init__(self, emb_size, num_layers, hidden_size, output_size,
dropout, output_weights_initializer=None):
super(ffnn, self).__init__()
self.dropout = torch.nn.Dropout(p=dropout)
self.weights = nn.Parameter(torch.Tensor(emb_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size))
self.activation = torch.nn.ReLU()
self.num_layers = num_layers
self.emb_size = emb_size
self.hidden_size = hidden_size
self.output_size = output_size
self.initializer = output_weights_initializer
self.initialize()
def initialize(self):
if self.initializer is None:
torch.nn.init.xavier_uniform_(self.weights, gain=1)
else:
self.initializer(self.weights, gain=1)
nn.init.zeros_(self.bias)
def forward(self, inputs):
current_inputs = inputs
if len(get_shape(inputs)) == 3:
batch_size, seqlen, emb_size = get_shape(inputs)
current_inputs = inputs.reshape(batch_size * seqlen, emb_size)
emb_size = get_shape(current_inputs)[-1]
assert emb_size == self.emb_size, 'last dim of input does not match this layer'
outputs = current_inputs.matmul(self.weights) + self.bias
if len(get_shape(inputs)) == 3:
outputs = outputs.reshape(batch_size, seqlen, self.output_size)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_size': 4, 'num_layers': 1, 'hidden_size': 4,
'output_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
def get_shape(t):
return list(t.shape)
class ffnnNew(nn.Module):
def __init__(self, emb_size, num_layers, hidden_size, output_size,
dropout, output_weights_initializer=None):
super(ffnnNew, self).__init__()
self.dropout = torch.nn.Dropout(p=dropout)
self.weights = nn.Parameter(torch.Tensor(emb_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size))
self.activation = torch.nn.ReLU()
self.num_layers = num_layers
self.emb_size = emb_size
self.hidden_size = hidden_size
self.output_size = output_size
self.initializer = output_weights_initializer
self.initialize()
def initialize(self):
if self.initializer is None:
torch.nn.init.xavier_uniform_(self.weights, gain=1)
else:
self.initializer(self.weights, gain=1)
nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_2 = self.weights
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ciaochiaociao/CLNER | ffnn | false | 3,375 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
def get_shape(t):
return list(t.shape)
class Model(nn.Module):
def __init__(self, emb_size, num_layers, hidden_size, output_size,
dropout, output_weights_initializer=None):
super().__init__()
self.dropout = torch.nn.Dropout(p=dropout)
self.weights = nn.Parameter(torch.Tensor(emb_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size))
self.activation = torch.nn.ReLU()
self.num_layers = num_layers
self.emb_size = emb_size
self.hidden_size = hidden_size
self.output_size = output_size
self.initializer = output_weights_initializer
self.initialize()
def initialize(self):
if self.initializer is None:
torch.nn.init.xavier_uniform_(self.weights, gain=1)
else:
self.initializer(self.weights, gain=1)
nn.init.zeros_(self.bias)
def forward(self, inputs):
current_inputs = inputs
if len(get_shape(inputs)) == 3:
batch_size, seqlen, emb_size = get_shape(inputs)
current_inputs = inputs.reshape(batch_size * seqlen, emb_size)
emb_size = get_shape(current_inputs)[-1]
assert emb_size == self.emb_size, 'last dim of input does not match this layer'
outputs = current_inputs.matmul(self.weights) + self.bias
if len(get_shape(inputs)) == 3:
outputs = outputs.reshape(batch_size, seqlen, self.output_size)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_size': 4, 'num_layers': 1, 'hidden_size': 4,
'output_size': 4, 'dropout': 0.5}]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/di/cdijcpkyx56houfal4ki6il7jkaksycupkszs32h2msepvzzrlog.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 676) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pl/cpllpfgu6yj3hoeiy2fgu22gwwiekps4ak5ojo4ju7e2tpij6eek.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tr/ctrkzmyh4pj3ravgoqrmvb4w47tnmqrj33664h4xyyqctdwzby5v.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2q/c2qz2ws53yjsy6jju72g7tnxehfjxae2rfg45uiafec7haukf7jm.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_9 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kt/cktoaw2tj346cwycsyhchtdmh6r7rvxeyqrjqite25fkxbpqqwh7.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_4 = async_compile.triton('triton_per_fused__log_softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 26, 26), (21632, 676, 26, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 86528, grid=grid(86528), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 24, 24), (36864, 576, 24, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 147456, grid=grid(147456), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.int8)
buf5 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 36864, grid=grid(36864), stream=stream0)
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf7, primals_7, 512, grid=grid(512), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf11 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_4.run(buf8, buf11, 4, 10, grid=grid(4), stream=stream0)
del buf8
return (buf11, primals_2, primals_4, primals_1, buf1, buf3, buf4, reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), buf7, buf11, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view((-1, 1, 28, 28))
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 26, 26), (21632, 676, 26, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86528)](buf1, primals_3,
86528, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 24, 24), (36864, 576, 24, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(147456)](buf3, primals_5,
147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(36864)](buf3, buf4,
buf5, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0),
reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_3[grid(512)](buf7, primals_7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf11 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_4[grid(4)](buf8, buf11, 4, 10, XBLOCK
=1, num_warps=2, num_stages=1)
del buf8
return (buf11, primals_2, primals_4, primals_1, buf1, buf3, buf4,
reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), buf7, buf11,
primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| cyliu0204/petastorm | Net | false | 3,376 | [
"Apache-2.0"
] | 0 | 589d40ce372c311382f37e4271a5169dafba5db1 | https://github.com/cyliu0204/petastorm/tree/589d40ce372c311382f37e4271a5169dafba5db1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view((-1, 1, 28, 28))
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return []
|
BertSelfOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/uv/cuv5ywiflh7n5wdgqtkufif4yho6cbzwcjdxkdjubib3zte4a6ke.py
# Topologically Sorted Source Nodes: [hidden_states_2, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# hidden_states_2 => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/my/cmysbppbwfknwvg2gi2jiqqs7l3hosm7k7nbz7vf3rtnd4p6gznh.py
# Topologically Sorted Source Nodes: [hidden_states_2, u, sub], Original ATen: [aten.add, aten.mean, aten.sub]
# Source node to ATen node mapping:
# hidden_states_2 => add
# sub => sub
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_mean_sub_1 = async_compile.triton('triton_poi_fused_add_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4p/c4pnuv3rymhg72qutbvx7mkzv6t7edcefa73bt3nl66b4qtouu4a.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_3], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_3 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_5), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [hidden_states_2, u, sub], Original ATen: [aten.add, aten.mean, aten.sub]
triton_poi_fused_add_mean_sub_1.run(buf2, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_3], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_4, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.onnx
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias
=False)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_3, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mean_sub_1[grid(256)](buf2, primals_3, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_4,
buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutputNew(nn.Module):
def __init__(self, config):
super(BertSelfOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias
=False)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_4 = self.LayerNorm.weight
primals_5 = self.LayerNorm.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Splendon/examples | BertSelfOutput | false | 3,377 | [
"MIT"
] | 0 | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | https://github.com/Splendon/examples/tree/ed4a8a01857b6ddca49559141acf5d0986eb01e1 | from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.onnx
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias
=False)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
hidden_dropout_prob=0.5)}]
|
HexaLinearScore | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ar/carmia3wh3a4soirr2hwld2ewi75pvcghm4jka76mzx45462dleq.py
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# g1 => clone, view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [8, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ht/chtw7yb2aeiwqf7e6wcciqo5cs7snfwq55c2zmy7756abjt5odwg.py
# Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# g2 => clone_1, view_2
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format})
# %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_1, [8, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_1 = async_compile.triton('triton_poi_fused__unsafe_view_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6y/c6yrrdc2nauu5gzytu6guntvth4pajjmsnsfsjwimejql5e62rme.py
# Topologically Sorted Source Nodes: [g3], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# g3 => clone_2, view_4
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [8, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_2 = async_compile.triton('triton_poi_fused__unsafe_view_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u5/cu54wdpiiawrisqv7ugmuf666653hkp2zredg43wlga6qn64lsbl.py
# Topologically Sorted Source Nodes: [mul, temp01], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# temp01 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_5), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3168
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oj/cojedjn2cyrz3u5rpahykrfq5sui4tyzd4aus2aip6rvwrpymfzj.py
# Topologically Sorted Source Nodes: [temp02], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# temp02 => mul_2, mul_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %mul_2), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 1584
x0 = xindex % 396
x3 = (xindex // 6336)
x2 = (xindex // 1584) % 4
x4 = (xindex // 1584)
tmp0 = tl.load(in_ptr0 + (x5), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (396*x3)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + (396*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x5 + (1600*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jy/cjyih4xrikzd7ipfrh25genasic2vujoqtxwqanglf4xqxvltqos.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
# Source node to ATen node mapping:
# score => bmm
# Graph fragment:
# %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_6, %view_7), kwargs = {})
# %permute_10 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_7, [0, 2, 1]), kwargs = {})
triton_poi_fused_bmm_transpose_5 = async_compile.triton('triton_poi_fused_bmm_transpose_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = (xindex // 396)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (396*(x1 % 4)) + (1600*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
tl.store(out_ptr1 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4g/c4guh5fiotlbffthmqf7v7xrn4nvwh2cb5ppnveaklu6nk2lqe3f.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# score_1 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 19.8997487421324), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 396), (396, 1))
assert_size_stride(primals_5, (4, 20), (20, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
assert_size_stride(primals_7, (20, 396), (396, 1))
assert_size_stride(primals_8, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
buf1 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone, aten._unsafe_view]
triton_poi_fused__unsafe_view_clone_1.run(primals_1, buf2, 32, grid=grid(32), stream=stream0)
buf3 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g2], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g3], Original ATen: [aten.clone, aten._unsafe_view]
triton_poi_fused__unsafe_view_clone_2.run(primals_1, buf4, 32, grid=grid(32), stream=stream0)
del primals_1
buf5 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g3], Original ATen: [aten.mm]
extern_kernels.mm(buf4, primals_4, out=buf5)
del primals_4
buf6 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g4], Original ATen: [aten.mm]
extern_kernels.mm(primals_5, primals_6, out=buf6)
buf7 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g5], Original ATen: [aten.mm]
extern_kernels.mm(primals_5, primals_7, out=buf7)
buf8 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g6], Original ATen: [aten.mm]
extern_kernels.mm(primals_5, primals_8, out=buf8)
buf9 = empty_strided_cuda((4, 2, 396), (792, 396, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, temp01], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(buf1, buf3, buf5, buf9, 3168, grid=grid(3168), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 4, 396), (6400, 1600, 396, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp02], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(buf8, buf6, buf7, buf10, 25344, grid=grid(25344), stream=stream0)
buf11 = empty_strided_cuda((1, 396, 64), (25344, 1, 396), torch.float32)
buf14 = empty_strided_cuda((1, 64, 396), (25344, 396, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
triton_poi_fused_bmm_transpose_5.run(buf10, buf11, buf14, 25344, grid=grid(25344), stream=stream0)
del buf10
buf12 = empty_strided_cuda((1, 8, 64), (512, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (1, 8, 396), (0, 396, 1), 0), buf11, out=buf12)
del buf11
buf13 = reinterpret_tensor(buf12, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf13, 512, grid=grid(512), stream=stream0)
return (buf13, buf1, buf3, buf5, buf6, buf7, buf8, reinterpret_tensor(buf9, (1, 396, 8), (3168, 1, 396), 0), buf14, reinterpret_tensor(primals_5, (20, 4), (1, 20), 0), reinterpret_tensor(primals_8, (396, 20), (1, 396), 0), reinterpret_tensor(primals_7, (396, 20), (1, 396), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0), reinterpret_tensor(buf4, (4, 8), (1, 4), 0), reinterpret_tensor(buf2, (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class HexaLinearScore(nn.Module):
"""
Outer product version of hexalinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(HexaLinearScore, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T1, std=self.std)
nn.init.normal_(self.T2, std=self.std)
nn.init.normal_(self.T3, std=self.std)
nn.init.normal_(self.W1, std=self.std)
nn.init.normal_(self.W2, std=self.std)
nn.init.normal_(self.W3, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g1 = torch.matmul(word_emb[:, :-2], self.W1)
g2 = torch.matmul(word_emb[:, 1:-1], self.W2)
g3 = torch.matmul(word_emb[:, 2:], self.W3)
g4 = torch.matmul(self.tag_emd, self.T1)
g5 = torch.matmul(self.tag_emd, self.T2)
g6 = torch.matmul(self.tag_emd, self.T3)
temp01 = g1 * g2 * g3
temp02 = torch.einsum('ak,bk,ck->abck', [g4, g5, g6])
score = torch.einsum('nmk,abck->nmabc', [temp01, temp02])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'wemb_size': 4, 'tagset_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3168
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 1584
x0 = xindex % 396
x3 = xindex // 6336
x2 = xindex // 1584 % 4
x4 = xindex // 1584
tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 396 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x5 + 1600 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25344
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = xindex // 396
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
tl.store(out_ptr1 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 396), (396, 1))
assert_size_stride(primals_5, (4, 20), (20, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
assert_size_stride(primals_7, (20, 396), (396, 1))
assert_size_stride(primals_8, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(32)](primals_1, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(32)](primals_1, buf2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_2[grid(32)](primals_1, buf4, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf5 = empty_strided_cuda((8, 396), (396, 1), torch.float32)
extern_kernels.mm(buf4, primals_4, out=buf5)
del primals_4
buf6 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_6, out=buf6)
buf7 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_7, out=buf7)
buf8 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_5, primals_8, out=buf8)
buf9 = empty_strided_cuda((4, 2, 396), (792, 396, 1), torch.float32)
triton_poi_fused_mul_3[grid(3168)](buf1, buf3, buf5, buf9, 3168,
XBLOCK=128, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4, 396), (6400, 1600, 396, 1),
torch.float32)
triton_poi_fused_mul_4[grid(25344)](buf8, buf6, buf7, buf10, 25344,
XBLOCK=128, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((1, 396, 64), (25344, 1, 396), torch.float32
)
buf14 = empty_strided_cuda((1, 64, 396), (25344, 396, 1), torch.float32
)
triton_poi_fused_bmm_transpose_5[grid(25344)](buf10, buf11, buf14,
25344, XBLOCK=128, num_warps=4, num_stages=1)
del buf10
buf12 = empty_strided_cuda((1, 8, 64), (512, 64, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (1, 8, 396), (0, 396, 1
), 0), buf11, out=buf12)
del buf11
buf13 = reinterpret_tensor(buf12, (4, 2, 4, 4, 4), (128, 64, 16, 4,
1), 0)
del buf12
triton_poi_fused_div_6[grid(512)](buf13, 512, XBLOCK=256, num_warps
=4, num_stages=1)
return buf13, buf1, buf3, buf5, buf6, buf7, buf8, reinterpret_tensor(buf9,
(1, 396, 8), (3168, 1, 396), 0), buf14, reinterpret_tensor(primals_5,
(20, 4), (1, 20), 0), reinterpret_tensor(primals_8, (396, 20), (1,
396), 0), reinterpret_tensor(primals_7, (396, 20), (1, 396), 0
), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0
), reinterpret_tensor(buf4, (4, 8), (1, 4), 0), reinterpret_tensor(buf2
, (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0)
class HexaLinearScoreNew(nn.Module):
"""
Outer product version of hexalinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(HexaLinearScoreNew, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T1, std=self.std)
nn.init.normal_(self.T2, std=self.std)
nn.init.normal_(self.T3, std=self.std)
nn.init.normal_(self.W1, std=self.std)
nn.init.normal_(self.W2, std=self.std)
nn.init.normal_(self.W3, std=self.std)
def forward(self, input_0):
primals_5 = self.tag_emd
primals_2 = self.W1
primals_3 = self.W2
primals_4 = self.W3
primals_6 = self.T1
primals_7 = self.T2
primals_8 = self.T3
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| ciaochiaociao/CLNER | HexaLinearScore | false | 3,378 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import math
import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Model(nn.Module):
"""
Outer product version of hexalinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super().__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T1, std=self.std)
nn.init.normal_(self.T2, std=self.std)
nn.init.normal_(self.T3, std=self.std)
nn.init.normal_(self.W1, std=self.std)
nn.init.normal_(self.W2, std=self.std)
nn.init.normal_(self.W3, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g1 = torch.matmul(word_emb[:, :-2], self.W1)
g2 = torch.matmul(word_emb[:, 1:-1], self.W2)
g3 = torch.matmul(word_emb[:, 2:], self.W3)
g4 = torch.matmul(self.tag_emd, self.T1)
g5 = torch.matmul(self.tag_emd, self.T2)
g6 = torch.matmul(self.tag_emd, self.T3)
temp01 = g1 * g2 * g3
temp02 = torch.einsum('ak,bk,ck->abck', [g4, g5, g6])
score = torch.einsum('nmk,abck->nmabc', [temp01, temp02])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
QuadriLinearScore | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v3/cv3ybrq4mz3m4ysotv2pijcf4hwcak3rq4zzuh4c7ra25ls6d2f2.py
# Topologically Sorted Source Nodes: [g0], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# g0 => clone, view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [12, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 3)) + (16*(x1 // 3))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uy/cuyr6zgmv4trebzrxzaaydiawo5g5pt6xgkahl4nfpc4kmvsr4wb.py
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# g1 => clone_1, view_2
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format})
# %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_1, [12, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_1 = async_compile.triton('triton_poi_fused__unsafe_view_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (4*(x1 % 3)) + (16*(x1 // 3))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ae/cae5qllyrn5trsejvmxiqy5ctiy3vsb3lfas6nhktsovuegggrry.py
# Topologically Sorted Source Nodes: [temp012], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# temp012 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x2 = (xindex // 1584)
x3 = xindex % 1584
tmp0 = tl.load(in_ptr0 + (x0 + (396*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (396*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3 + (1600*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctstdurw7bc4bbvnxunt3mgwrvxsegqqit5fbbae7olo7trhm3vv.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
# Source node to ATen node mapping:
# score => bmm
# Graph fragment:
# %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_4, %view_5), kwargs = {})
# %permute_8 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_4, [0, 2, 1]), kwargs = {})
triton_poi_fused_bmm_transpose_3 = async_compile.triton('triton_poi_fused_bmm_transpose_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_transpose_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = (xindex // 396)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (396*(x1 % 4)) + (1600*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
tl.store(out_ptr1 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e2/ce2vbtsfqo5opo5oc54myuy2kkysz6rz4zvpqo3jrsw43ldfwvz4.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# score_1 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 19.8997487421324), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (20, 396), (396, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g0], Original ATen: [aten.clone, aten._unsafe_view]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 48, grid=grid(48), stream=stream0)
buf1 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g0], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view]
triton_poi_fused__unsafe_view_clone_1.run(primals_1, buf2, 48, grid=grid(48), stream=stream0)
del primals_1
buf3 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g2], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
# Topologically Sorted Source Nodes: [g3], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, primals_6, out=buf5)
buf6 = empty_strided_cuda((4, 3, 4, 396), (4800, 1600, 396, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp012], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf1, buf3, buf4, buf6, 19008, grid=grid(19008), stream=stream0)
buf7 = empty_strided_cuda((1, 48, 396), (19008, 396, 1), torch.float32)
buf10 = empty_strided_cuda((1, 396, 48), (19008, 1, 396), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
triton_poi_fused_bmm_transpose_3.run(buf6, buf7, buf10, 19008, grid=grid(19008), stream=stream0)
del buf6
buf8 = empty_strided_cuda((1, 48, 4), (192, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf5, (1, 396, 4), (0, 1, 396), 0), out=buf8)
del buf7
buf9 = reinterpret_tensor(buf8, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div]
triton_poi_fused_div_4.run(buf9, 192, grid=grid(192), stream=stream0)
return (buf9, buf1, buf3, buf4, buf10, reinterpret_tensor(buf5, (1, 4, 396), (396, 396, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0), reinterpret_tensor(primals_5, (396, 20), (1, 396), 0), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), reinterpret_tensor(buf0, (4, 12), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class QuadriLinearScore(nn.Module):
"""
Outer product version of quadrilinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, window_size=1, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(QuadriLinearScore, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.window_size = window_size
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T, std=self.std)
nn.init.normal_(self.U, std=self.std)
nn.init.normal_(self.V, std=self.std)
nn.init.normal_(self.W, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g0 = torch.matmul(word_emb[:, :-self.window_size], self.U)
g1 = torch.matmul(word_emb[:, self.window_size:], self.T)
g2 = torch.matmul(self.tag_emd, self.V)
g3 = torch.matmul(self.tag_emd, self.W)
temp01 = g0 * g1
temp012 = torch.einsum('nak,bk->nabk', [temp01, g2])
score = torch.einsum('nabk,ck->nabc', [temp012, g3])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'wemb_size': 4, 'tagset_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x2 = xindex // 1584
x3 = xindex % 1584
tmp0 = tl.load(in_ptr0 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 396 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3 + 1600 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 19008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 396
x1 = xindex // 396
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
tl.store(out_ptr1 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.050251890762960605
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 396), (396, 1))
assert_size_stride(primals_3, (4, 396), (396, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (20, 396), (396, 1))
assert_size_stride(primals_6, (20, 396), (396, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(48)](primals_1, buf0, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(48)](primals_1, buf2, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((12, 396), (396, 1), torch.float32)
extern_kernels.mm(buf2, primals_3, out=buf3)
del primals_3
buf4 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_4, primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 396), (396, 1), torch.float32)
extern_kernels.mm(primals_4, primals_6, out=buf5)
buf6 = empty_strided_cuda((4, 3, 4, 396), (4800, 1600, 396, 1),
torch.float32)
triton_poi_fused_mul_2[grid(19008)](buf1, buf3, buf4, buf6, 19008,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((1, 48, 396), (19008, 396, 1), torch.float32)
buf10 = empty_strided_cuda((1, 396, 48), (19008, 1, 396), torch.float32
)
triton_poi_fused_bmm_transpose_3[grid(19008)](buf6, buf7, buf10,
19008, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((1, 48, 4), (192, 4, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf5, (1, 396, 4), (0,
1, 396), 0), out=buf8)
del buf7
buf9 = reinterpret_tensor(buf8, (4, 3, 4, 4), (48, 16, 4, 1), 0)
del buf8
triton_poi_fused_div_4[grid(192)](buf9, 192, XBLOCK=256, num_warps=
4, num_stages=1)
return buf9, buf1, buf3, buf4, buf10, reinterpret_tensor(buf5, (1, 4,
396), (396, 396, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1,
20), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0
), reinterpret_tensor(primals_5, (396, 20), (1, 396), 0
), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), reinterpret_tensor(
buf0, (4, 12), (1, 4), 0)
class QuadriLinearScoreNew(nn.Module):
"""
Outer product version of quadrilinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, window_size=1, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super(QuadriLinearScoreNew, self).__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.window_size = window_size
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T, std=self.std)
nn.init.normal_(self.U, std=self.std)
nn.init.normal_(self.V, std=self.std)
nn.init.normal_(self.W, std=self.std)
def forward(self, input_0):
primals_4 = self.tag_emd
primals_2 = self.T
primals_3 = self.U
primals_5 = self.V
primals_6 = self.W
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| ciaochiaociao/CLNER | QuadriLinearScore | false | 3,379 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import math
import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Model(nn.Module):
"""
Outer product version of quadrilinear function for sequence labeling.
"""
def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std=
0.1545, window_size=1, normalization=True, **kwargs):
"""
Args:
wemb_size: word embedding hidden size
tagset_size: tag set size
temb_size: tag embedding size
rank: rank of the weight tensor
std: standard deviation of the tensor
"""
super().__init__()
self.wemb_size = wemb_size
self.tagset_size = tagset_size
self.temb_size = temb_size
self.rank = rank
self.std = std
self.window_size = window_size
self.normalization = normalization
self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self.
temb_size))
self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank))
self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank))
self.rand_init()
self
def rand_init(self):
"""random initialization
"""
nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b=
math.sqrt(6 / self.temb_size))
nn.init.normal_(self.T, std=self.std)
nn.init.normal_(self.U, std=self.std)
nn.init.normal_(self.V, std=self.std)
nn.init.normal_(self.W, std=self.std)
def forward(self, word_emb):
"""
Args:
word_emb: [batch, sent_length, wemb_size]
Returns: Tensor
[batch, sent_length-window_size, tagset_size, tagset_size]
"""
assert word_emb.size(2
) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.'
g0 = torch.matmul(word_emb[:, :-self.window_size], self.U)
g1 = torch.matmul(word_emb[:, self.window_size:], self.T)
g2 = torch.matmul(self.tag_emd, self.V)
g3 = torch.matmul(self.tag_emd, self.W)
temp01 = g0 * g1
temp012 = torch.einsum('nak,bk->nabk', [temp01, g2])
score = torch.einsum('nabk,ck->nabc', [temp012, g3])
if self.normalization:
score = score / math.sqrt(self.rank)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MaxMinGroup | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6p/c6p6sxn6eyjkfrrvgdkwlhviqvdquvcktva3443bsgc22ofn2dk7.py
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# maxmin => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x3), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroup(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroup, self).__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'group_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 &
xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * (-4 + x1) + 64 * x2), tmp14 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x3, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroupNew(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroupNew, self).__init__()
self.group_size = group_size
self.axis = axis
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| david-klindt/invertible-resnet | MaxMinGroup | false | 3,380 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class Model(nn.Module):
def __init__(self, group_size, axis=-1):
super().__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ResidualDenseBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ry/cryiobypkawkmdldevamc2xgthshbnrm6is6ppwpjekkiy25lx7z.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pd/cpd4meipbe2zv2hwyhb6cjpvxof6tzs5i6jeu6dqinxcb26whgcv.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp20 & xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + ((-8) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3c/c3cglho5wyr4b4bwpkwiar5a4l3ewyyzusu3gf2a2qlomqutylik.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp32 & xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + ((-12) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x3), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/la/clagauvrhsnsmt2s2mrzhhfmw6omixx6owqe7iwdjdtls52tkc5v.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2, %where_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 20
x0 = xindex % 16
x2 = (xindex // 320)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp35 & xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + ((-12) + x1), tmp35 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tmp45 = tl.full([1], 20, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr7 + (x0 + (16*((-16) + x1)) + (64*x2)), tmp44 & xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + ((-16) + x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + (x3), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iu/ciubqy3qr45bvfhdurk7hjkhteasgy5mlezftic2idhmf7qyh6ar.py
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_4
# out => add
# out5 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_mul_4 = async_compile.triton('triton_poi_fused_add_convolution_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s5/cs5o65bd5qls62jyofgsrvlsxw6ku55z7wudf4ziucds3lbj4c7j.py
# Topologically Sorted Source Nodes: [conv2d_3, out4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# out4 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, primals_3, buf1, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf0, primals_3, buf2, primals_5, buf3, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf5, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, grid=grid(1280), stream=stream0)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_4.run(buf9, primals_11, primals_1, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, out4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf6, primals_9, buf10, 256, grid=grid(256), stream=stream0)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, out3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf4, primals_7, buf11, 256, grid=grid(256), stream=stream0)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf2, primals_5, buf12, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, out1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_3, buf13, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn as nn
class ResidualDenseBlock(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualDenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, x: 'Tensor') ->Tensor:
identity = x
out1 = self.leaky_relu(self.conv1(x))
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3,
out4], 1)))
out = out5 * 0.2 + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'growths': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp20 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + (-8 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tl.full([1], 16, tl.int64)
tmp35 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + (-12 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 20
x0 = xindex % 16
x2 = xindex // 320
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp35 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + (-12 + x1), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tl.full([1], 20, tl.int64)
tmp47 = tl.load(in_ptr7 + (x0 + 16 * (-16 + x1) + 64 * x2), tmp44 &
xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + (-16 + x1), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, primals_3, buf1,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_1[grid(768)](primals_1, buf0, primals_3, buf2,
primals_5, buf3, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_2[grid(1024)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf5, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(1280)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, XBLOCK
=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_add_convolution_mul_4[grid(256)](buf9, primals_11,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf6, primals_9, buf10, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf4, primals_7, buf11, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf2, primals_5, buf12, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf0, primals_3, buf13, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_3
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13)
class ResidualDenseBlockNew(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualDenseBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| cyun-404/PieESRGAN | ResidualDenseBlock | false | 3,381 | [
"Apache-2.0"
] | 0 | 22ffe683bf2389b646429494d1bc88e61a9d72c5 | https://github.com/cyun-404/PieESRGAN/tree/22ffe683bf2389b646429494d1bc88e61a9d72c5 | import torch
from torch import Tensor
import torch.nn as nn
class Model(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super().__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, x: 'Tensor') ->Tensor:
identity = x
out1 = self.leaky_relu(self.conv1(x))
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3,
out4], 1)))
out = out5 * 0.2 + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Split | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cp/ccplpqa4hjz3n3j5jujs47gyz26ndm5naxvwku5nkktkkpfuh4um.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ie/cie6ockbfnni22s6gh6b3qk2guxsbx7g4f3xzhpygcbbqvw47fbq.py
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg0_1, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Split(nn.Module):
def __init__(self):
super(Split, self).__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(128)](arg0_1, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0, buf1
class SplitNew(nn.Module):
def __init__(self):
super(SplitNew, self).__init__()
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| david-klindt/invertible-resnet | Split | false | 3,382 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ActNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nz/cnzkzycmku6msmsiehonl6lvkxkalhor6qvech326gpy2lqc3hoy.py
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# logdet => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/w6/cw6fwvvd65omur5x74r5pig6wm7q3m5ho7q4pfmcjdleaujxnwkx.py
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(primals_1, buf0, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class ActNorm(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](primals_1, buf0, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1,
primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, buf0, primals_1, primals_2
class ActNormNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNormNew, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def forward(self, input_0):
primals_1 = self._log_scale
primals_3 = self._shift
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| david-klindt/invertible-resnet | ActNorm | false | 3,383 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BiaffineAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qt/cqtn54x53z5e5qza53u36hhsftfflvdqwoao2xnj7xnw5ocdcx3f.py
# Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# output_2 => add_2
# output_3 => amax
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %view_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %permute_3), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_10), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [1], True), kwargs = {})
triton_poi_fused__softmax_add_0 = async_compile.triton('triton_poi_fused__softmax_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp5 = tl.load(in_ptr3 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp10 = tmp8 + tmp9
tmp11 = tmp10 + tmp3
tmp12 = tmp11 + tmp6
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 + tmp3
tmp18 = tmp17 + tmp6
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp22 = tmp20 + tmp21
tmp23 = tmp22 + tmp3
tmp24 = tmp23 + tmp6
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tl.store(out_ptr0 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ia/ciady5wligt2qz2veuk2j2mq4euwgkz2msjneq6me7apizr4flue.py
# Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# output_2 => add_2
# output_3 => exp, sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %view_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %permute_3), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_10), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# output_3 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (150, 4), (4, 1))
assert_size_stride(primals_4, (150, ), (1, ))
assert_size_stride(primals_5, (150, 4), (4, 1))
assert_size_stride(primals_6, (150, ), (1, ))
assert_size_stride(primals_7, (1, 150), (150, 1))
assert_size_stride(primals_8, (1, 150), (150, 1))
assert_size_stride(primals_9, (1, 150, 150), (22500, 150, 1))
assert_size_stride(primals_10, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 150), (150, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_s], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(primals_3, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 150), (150, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_t], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(primals_7, reinterpret_tensor(buf0, (150, 4), (1, 150), 0), out=buf2)
buf3 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_d], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_8, (1, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (1, 150, 16), (0, 1, 150), 0), out=buf3)
buf4 = empty_strided_cuda((1, 4, 150), (600, 150, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 150), (600, 150, 1), 0), primals_9, out=buf4)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (4, 150, 4), (600, 1, 150), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_add_0.run(buf5, buf3, buf2, primals_10, buf6, 4, grid=grid(4), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_1.run(buf7, buf5, buf2, primals_10, buf6, 16, grid=grid(16), stream=stream0)
del buf2
del buf6
del primals_10
buf8 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf7, buf8, 16, grid=grid(16), stream=stream0)
del buf7
return (buf8, primals_1, primals_7, buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf1, buf8, reinterpret_tensor(buf4, (4, 150, 1), (150, 1, 150), 0), reinterpret_tensor(primals_9, (1, 150, 150), (22500, 1, 150), 0), reinterpret_tensor(primals_8, (1, 150, 1), (150, 1, 150), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((150, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((150, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((150, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((150, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 150), (150, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 150), (150, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 150, 150), (22500, 150, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.nn
class BiaffineAttention(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, hidden_size=
150, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super(BiaffineAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self
.hidden_size)
self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self
.hidden_size)
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.b = Parameter(torch.Tensor(1, self.num_labels))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
hidden_size, self.hidden_size))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_normal_(self.U)
def forward(self, input_s, input_t, mask_d=None, mask_e=None):
"""
Args:
input_s: Tensor
the student input tensor with shape = [batch, input_size]
input_t: Tensor
the teacher input tensor with shape = [batch, num_teachers, input_size]
mask_d: None
mask_e: None
Returns: Tensor
the energy tensor with shape = [batch, length]
"""
assert input_s.size(0) == input_t.size(0
), 'batch sizes of encoder and decoder are requires to be equal.'
input_s.size()
_, _num_teachers, _ = input_t.size()
input_s = self.linear_encoder(input_s)
input_t = self.linear_decoder(input_t)
out_e = torch.matmul(self.W_e, input_s.transpose(1, 0)).transpose(1, 0)
out_d = torch.einsum('nd,bnd->bn', self.W_d, input_t)
if self.biaffine:
output = torch.einsum('bd,nde->bne', input_s, self.U)
output = torch.einsum('bne,bne->bn', output, input_t)
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None and mask_e is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
output = torch.nn.functional.softmax(output, 1)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_encoder': 4, 'input_size_decoder': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data.dataloader
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp10 = tmp8 + tmp9
tmp11 = tmp10 + tmp3
tmp12 = tmp11 + tmp6
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 + tmp3
tmp18 = tmp17 + tmp6
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp22 = tmp20 + tmp21
tmp23 = tmp22 + tmp3
tmp24 = tmp23 + tmp6
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tl.store(out_ptr0 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tl.store(in_out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (150, 4), (4, 1))
assert_size_stride(primals_4, (150,), (1,))
assert_size_stride(primals_5, (150, 4), (4, 1))
assert_size_stride(primals_6, (150,), (1,))
assert_size_stride(primals_7, (1, 150), (150, 1))
assert_size_stride(primals_8, (1, 150), (150, 1))
assert_size_stride(primals_9, (1, 150, 150), (22500, 150, 1))
assert_size_stride(primals_10, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 150), (150, 1), torch.float32)
extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(
primals_3, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 150), (150, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 150), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_7, reinterpret_tensor(buf0, (150, 4), (1,
150), 0), out=buf2)
buf3 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_8, (1, 1, 150), (150,
150, 1), 0), reinterpret_tensor(buf1, (1, 150, 16), (0, 1, 150),
0), out=buf3)
buf4 = empty_strided_cuda((1, 4, 150), (600, 150, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 150), (600, 150,
1), 0), primals_9, out=buf4)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 150), (150, 150,
1), 0), reinterpret_tensor(buf1, (4, 150, 4), (600, 1, 150), 0),
out=buf5)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_0[grid(4)](buf5, buf3, buf2,
primals_10, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_add_1[grid(16)](buf7, buf5, buf2,
primals_10, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del buf6
del primals_10
buf8 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf7
return buf8, primals_1, primals_7, buf0, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), buf1, buf8, reinterpret_tensor(buf4, (4, 150,
1), (150, 1, 150), 0), reinterpret_tensor(primals_9, (1, 150, 150),
(22500, 1, 150), 0), reinterpret_tensor(primals_8, (1, 150, 1), (
150, 1, 150), 0)
class BiaffineAttentionNew(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, hidden_size=
150, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super(BiaffineAttentionNew, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self
.hidden_size)
self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self
.hidden_size)
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.b = Parameter(torch.Tensor(1, self.num_labels))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
hidden_size, self.hidden_size))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_normal_(self.U)
def forward(self, input_0, input_1):
primals_7 = self.W_d
primals_8 = self.W_e
primals_10 = self.b
primals_9 = self.U
primals_3 = self.linear_encoder.weight
primals_4 = self.linear_encoder.bias
primals_5 = self.linear_decoder.weight
primals_6 = self.linear_decoder.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| ciaochiaociao/CLNER | BiaffineAttention | false | 3,384 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.nn
class Model(nn.Module):
"""
Adopted from NeuroNLP2:
https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py
Bi-Affine attention layer.
"""
def __init__(self, input_size_encoder, input_size_decoder, hidden_size=
150, num_labels=1, biaffine=True, **kwargs):
"""
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
"""
super().__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self
.hidden_size)
self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self
.hidden_size)
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size))
self.b = Parameter(torch.Tensor(1, self.num_labels))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
hidden_size, self.hidden_size))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.W_d)
nn.init.xavier_normal_(self.W_e)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_normal_(self.U)
def forward(self, input_s, input_t, mask_d=None, mask_e=None):
"""
Args:
input_s: Tensor
the student input tensor with shape = [batch, input_size]
input_t: Tensor
the teacher input tensor with shape = [batch, num_teachers, input_size]
mask_d: None
mask_e: None
Returns: Tensor
the energy tensor with shape = [batch, length]
"""
assert input_s.size(0) == input_t.size(0
), 'batch sizes of encoder and decoder are requires to be equal.'
input_s.size()
_, _num_teachers, _ = input_t.size()
input_s = self.linear_encoder(input_s)
input_t = self.linear_decoder(input_t)
out_e = torch.matmul(self.W_e, input_s.transpose(1, 0)).transpose(1, 0)
out_d = torch.einsum('nd,bnd->bn', self.W_d, input_t)
if self.biaffine:
output = torch.einsum('bd,nde->bne', input_s, self.U)
output = torch.einsum('bne,bne->bn', output, input_t)
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None and mask_e is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
output = torch.nn.functional.softmax(output, 1)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
VNMaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4b/c4bin6v3nraanry5cu4t7z3qycurd6acdwc4l5azpezbv2erqt7w.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ff/cff4pauyodt3bzn5rggprurywn64qeyzwyhkbgmdbs4wzkmbk24h.py
# Topologically Sorted Source Nodes: [mul, dotprod, max_1], Original ATen: [aten.mul, aten.sum, aten.max]
# Source node to ATen node mapping:
# dotprod => sum_1
# max_1 => max_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %permute_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2], True), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%sum_1, -1), kwargs = {})
triton_poi_fused_max_mul_sum_1 = async_compile.triton('triton_poi_fused_max_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (20 + x0 + (64*x1)), xmask)
tmp22 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (24 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (28 + x0 + (64*x1)), xmask)
tmp45 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp48 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (36 + x0 + (64*x1)), xmask)
tmp52 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (40 + x0 + (64*x1)), xmask)
tmp56 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (44 + x0 + (64*x1)), xmask)
tmp74 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last')
tmp75 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp77 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr1 + (52 + x0 + (64*x1)), xmask)
tmp81 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr1 + (56 + x0 + (64*x1)), xmask)
tmp85 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr1 + (60 + x0 + (64*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 > tmp29
tmp31 = tmp14 == tmp29
tmp32 = tmp14 != tmp14
tmp33 = tmp29 != tmp29
tmp34 = tmp32 > tmp33
tmp35 = tmp30 | tmp34
tmp36 = tmp32 & tmp33
tmp37 = tmp31 | tmp36
tmp38 = tl.full([1], 0, tl.int64)
tmp39 = tl.full([1], 1, tl.int64)
tmp40 = tmp38 < tmp39
tmp41 = tmp37 & tmp40
tmp42 = tmp35 | tmp41
tmp43 = tl.where(tmp42, tmp14, tmp29)
tmp44 = tl.where(tmp42, tmp38, tmp39)
tmp47 = tmp45 * tmp46
tmp50 = tmp48 * tmp49
tmp51 = tmp47 + tmp50
tmp54 = tmp52 * tmp53
tmp55 = tmp51 + tmp54
tmp58 = tmp56 * tmp57
tmp59 = tmp55 + tmp58
tmp60 = tmp43 > tmp59
tmp61 = tmp43 == tmp59
tmp62 = tmp43 != tmp43
tmp63 = tmp59 != tmp59
tmp64 = tmp62 > tmp63
tmp65 = tmp60 | tmp64
tmp66 = tmp62 & tmp63
tmp67 = tmp61 | tmp66
tmp68 = tl.full([1], 2, tl.int64)
tmp69 = tmp44 < tmp68
tmp70 = tmp67 & tmp69
tmp71 = tmp65 | tmp70
tmp72 = tl.where(tmp71, tmp43, tmp59)
tmp73 = tl.where(tmp71, tmp44, tmp68)
tmp76 = tmp74 * tmp75
tmp79 = tmp77 * tmp78
tmp80 = tmp76 + tmp79
tmp83 = tmp81 * tmp82
tmp84 = tmp80 + tmp83
tmp87 = tmp85 * tmp86
tmp88 = tmp84 + tmp87
tmp89 = tmp72 > tmp88
tmp90 = tmp72 == tmp88
tmp91 = tmp72 != tmp72
tmp92 = tmp88 != tmp88
tmp93 = tmp91 > tmp92
tmp94 = tmp89 | tmp93
tmp95 = tmp91 & tmp92
tmp96 = tmp90 | tmp95
tmp97 = tl.full([1], 3, tl.int64)
tmp98 = tmp73 < tmp97
tmp99 = tmp96 & tmp98
tmp100 = tmp94 | tmp99
tmp101 = tl.where(tmp100, tmp72, tmp88)
tmp102 = tl.where(tmp100, tmp73, tmp97)
tl.store(out_ptr0 + (x2), tmp102, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hh/chhmjvtnydhywuctlrzgz3hln27ndcn643agxfpeht6gnhljobu2.py
# Topologically Sorted Source Nodes: [x_max], Original ATen: [aten.index]
# Source node to ATen node mapping:
# x_max => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%expand, %expand_1, %expand_2, %getitem_1]), kwargs = {})
triton_poi_fused_index_2 = async_compile.triton('triton_poi_fused_index_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x4)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.int64)
# Topologically Sorted Source Nodes: [mul, dotprod, max_1], Original ATen: [aten.mul, aten.sum, aten.max]
triton_poi_fused_max_mul_sum_1.run(arg0_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_max], Original ATen: [aten.index]
triton_poi_fused_index_2.run(buf2, arg0_1, buf3, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from itertools import product as product
class VNMaxPool(nn.Module):
def __init__(self, in_channels):
super(VNMaxPool, self).__init__()
self.map_to_dir = nn.Linear(in_channels, in_channels, bias=False)
def forward(self, x):
"""
x: point features of shape [B, N_feat, 3, N_samples, ...]
"""
d = self.map_to_dir(x.transpose(1, -1)).transpose(1, -1)
dotprod = (x * d).sum(2, keepdims=True)
idx = dotprod.max(dim=-1, keepdim=False)[1]
index_tuple = torch.meshgrid([torch.arange(j) for j in x.size()[:-1]]
) + (idx,)
x_max = x[index_tuple]
return x_max
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_max_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (20 + x0 + 64 * x1), xmask)
tmp22 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (24 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + (28 + x0 + 64 * x1), xmask)
tmp45 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp48 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp49 = tl.load(in_ptr1 + (36 + x0 + 64 * x1), xmask)
tmp52 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr1 + (40 + x0 + 64 * x1), xmask)
tmp56 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp57 = tl.load(in_ptr1 + (44 + x0 + 64 * x1), xmask)
tmp74 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp75 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp77 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp78 = tl.load(in_ptr1 + (52 + x0 + 64 * x1), xmask)
tmp81 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp82 = tl.load(in_ptr1 + (56 + x0 + 64 * x1), xmask)
tmp85 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp86 = tl.load(in_ptr1 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 > tmp29
tmp31 = tmp14 == tmp29
tmp32 = tmp14 != tmp14
tmp33 = tmp29 != tmp29
tmp34 = tmp32 > tmp33
tmp35 = tmp30 | tmp34
tmp36 = tmp32 & tmp33
tmp37 = tmp31 | tmp36
tmp38 = tl.full([1], 0, tl.int64)
tmp39 = tl.full([1], 1, tl.int64)
tmp40 = tmp38 < tmp39
tmp41 = tmp37 & tmp40
tmp42 = tmp35 | tmp41
tmp43 = tl.where(tmp42, tmp14, tmp29)
tmp44 = tl.where(tmp42, tmp38, tmp39)
tmp47 = tmp45 * tmp46
tmp50 = tmp48 * tmp49
tmp51 = tmp47 + tmp50
tmp54 = tmp52 * tmp53
tmp55 = tmp51 + tmp54
tmp58 = tmp56 * tmp57
tmp59 = tmp55 + tmp58
tmp60 = tmp43 > tmp59
tmp61 = tmp43 == tmp59
tmp62 = tmp43 != tmp43
tmp63 = tmp59 != tmp59
tmp64 = tmp62 > tmp63
tmp65 = tmp60 | tmp64
tmp66 = tmp62 & tmp63
tmp67 = tmp61 | tmp66
tmp68 = tl.full([1], 2, tl.int64)
tmp69 = tmp44 < tmp68
tmp70 = tmp67 & tmp69
tmp71 = tmp65 | tmp70
tmp72 = tl.where(tmp71, tmp43, tmp59)
tmp73 = tl.where(tmp71, tmp44, tmp68)
tmp76 = tmp74 * tmp75
tmp79 = tmp77 * tmp78
tmp80 = tmp76 + tmp79
tmp83 = tmp81 * tmp82
tmp84 = tmp80 + tmp83
tmp87 = tmp85 * tmp86
tmp88 = tmp84 + tmp87
tmp89 = tmp72 > tmp88
tmp90 = tmp72 == tmp88
tmp91 = tmp72 != tmp72
tmp92 = tmp88 != tmp88
tmp93 = tmp91 > tmp92
tmp94 = tmp89 | tmp93
tmp95 = tmp91 & tmp92
tmp96 = tmp90 | tmp95
tmp97 = tl.full([1], 3, tl.int64)
tmp98 = tmp73 < tmp97
tmp99 = tmp96 & tmp98
tmp100 = tmp94 | tmp99
tl.where(tmp100, tmp72, tmp88)
tmp102 = tl.where(tmp100, tmp73, tmp97)
tl.store(out_ptr0 + x2, tmp102, xmask)
@triton.jit
def triton_poi_fused_index_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x4), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x4, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.int64)
triton_poi_fused_max_mul_sum_1[grid(16)](arg0_1, buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_index_2[grid(64)](buf2, arg0_1, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
del buf2
return buf3,
class VNMaxPoolNew(nn.Module):
def __init__(self, in_channels):
super(VNMaxPoolNew, self).__init__()
self.map_to_dir = nn.Linear(in_channels, in_channels, bias=False)
def forward(self, input_0):
arg1_1 = self.map_to_dir.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| david1309/SynergyNet_bonseyes | VNMaxPool | false | 3,385 | [
"MIT"
] | 0 | 9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | https://github.com/david1309/SynergyNet_bonseyes/tree/9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | import torch
import torch.nn as nn
from itertools import product as product
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.map_to_dir = nn.Linear(in_channels, in_channels, bias=False)
def forward(self, x):
"""
x: point features of shape [B, N_feat, 3, N_samples, ...]
"""
d = self.map_to_dir(x.transpose(1, -1)).transpose(1, -1)
dotprod = (x * d).sum(2, keepdims=True)
idx = dotprod.max(dim=-1, keepdim=False)[1]
index_tuple = torch.meshgrid([torch.arange(j) for j in x.size()[:-1]]
) + (idx,)
x_max = x[index_tuple]
return x_max
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Conv2dZeroInit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/b5/cb5h536lfxehnv2ezobudfl5wugu2y6mu444yw7yei4n22rp33zu.py
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# mul_1 => mul_1
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {})
triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv2dZeroInit(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'filter_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2,
primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, buf1
class Conv2dZeroInitNew(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_4 = self.logs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| david-klindt/invertible-resnet | Conv2dZeroInit | false | 3,386 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
class Model(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SqueezeExcite | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x_se => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_se_1 => convolution
# x_se_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hc/chcuw27vv75itefu6xswbdhopf2pq6oaj4bww4whelczhqtkghqr.py
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# x => mul
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rh/crhdgkbcu2rbqzaxws3536qfrpjhmp6d6zhyebzqejxvceodywdi.py
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# x_se_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_3 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_se], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_se_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_se_1, x_se_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_se_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_se_3, add, relu6, truediv, x], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
triton_poi_fused_add_convolution_div_hardtanh_mul_2.run(primals_1, buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_se_3, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import product as product
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExciteNew(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExciteNew, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, input_0):
primals_2 = self.conv_reduce.weight
primals_3 = self.conv_reduce.bias
primals_4 = self.conv_expand.weight
primals_5 = self.conv_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| david1309/SynergyNet_bonseyes | SqueezeExcite | false | 3,387 | [
"MIT"
] | 0 | 9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | https://github.com/david1309/SynergyNet_bonseyes/tree/9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import product as product
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class Model(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super().__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MeanVarFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xl/cxltji5v4pd435j44wezjn5dhexfkx4wrcadp35cffwfzsbaisad.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 8), (128, 32, 8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MeanVarFC(nn.Module):
def __init__(self, input_shape):
super(MeanVarFC, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return [[], {'input_shape': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(512)](primals_2, primals_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class MeanVarFCNew(nn.Module):
def __init__(self, input_shape):
super(MeanVarFCNew, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, input_0):
primals_1 = self.param
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| david-klindt/invertible-resnet | MeanVarFC | false | 3,388 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_shape):
super().__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return []
|
injective_pad | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ct/cct5cn6mo6zny2z4rzeplk2fltbb4cibi5gdqoa7cpdxa6kif7lx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [0, 0, 0, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32) % 4
x3 = (xindex // 128)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 4, 32, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class injective_pad(nn.Module):
def __init__(self, pad_size):
super(injective_pad, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def forward(self, x):
x = x.permute(0, 2, 1, 3)
x = self.pad(x)
return x.permute(0, 2, 1, 3)
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2 &
xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 4, 32, 1), 0),
class injective_padNew(nn.Module):
def __init__(self, pad_size):
super(injective_padNew, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| david-klindt/invertible-resnet | injective_pad | false | 3,389 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, pad_size):
super().__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def forward(self, x):
x = x.permute(0, 2, 1, 3)
x = self.pad(x)
return x.permute(0, 2, 1, 3)
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Bias | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/uz/cuzdojeecdnp6sj6qer5und3uv6oxkzgk2365i55gch5mqyktk3i.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Bias(nn.Module):
def __init__(self, size):
super().__init__()
self.bias = nn.Parameter(torch.Tensor(size))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
return x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class BiasNew(nn.Module):
def __init__(self, size):
super().__init__()
self.bias = nn.Parameter(torch.Tensor(size))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| daviddavini/cs-260-project | Bias | false | 3,390 | [
"MIT"
] | 0 | 9e1067f8ff85c8c573262589bbe52740ef11275d | https://github.com/daviddavini/cs-260-project/tree/9e1067f8ff85c8c573262589bbe52740ef11275d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, size):
super().__init__()
self.bias = nn.Parameter(torch.Tensor(size))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
return x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SimpleLinearModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ph/cphyvqksaznjc5f5gstivhj5vszkuncctuzvaegazln3taw555sz.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, linear], Original ATen: [aten.add, aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1)
del arg1_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.jit
import torch.onnx
import torch.nn
class SimpleLinearModule(torch.nn.Module):
def __init__(self):
super(SimpleLinearModule, self).__init__()
def forward(self, input, weight, bias=None):
return F.linear(input + input, weight, bias)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.jit
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(arg1_1, (4, 4), (1, 4),
0), out=buf1)
del arg1_1
del buf0
return buf1,
class SimpleLinearModuleNew(torch.nn.Module):
def __init__(self):
super(SimpleLinearModuleNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| andreas-hommel/glow | SimpleLinearModule | false | 3,391 | [
"Apache-2.0"
] | 0 | 2bbbf8188a2a941e85677c83f2146bbd076a262e | https://github.com/andreas-hommel/glow/tree/2bbbf8188a2a941e85677c83f2146bbd076a262e | import torch
import torch.nn.functional as F
import torch.jit
import torch.onnx
import torch.nn
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, weight, bias=None):
return F.linear(input + input, weight, bias)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
Dense | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pm/cpmfyvcp2ca3v7ooocuxw6ukhdtvrsjtkx2ksxus5cf3hcg6yg6b.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 152
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7t/c7tl4sumjtjry7tjtgnqmbovvksgqubsdowfuhrkz4ymnneywfuc.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (152, 4), (4, 1))
assert_size_stride(primals_2, (152, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 152), (152, 1))
assert_size_stride(primals_5, (48, ), (1, ))
assert_size_stride(primals_6, (1, 48), (48, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 152), (152, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 152), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 152), (2432, 608, 152, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 152), (2432, 608, 152, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 9728, grid=grid(9728), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 152), (152, 1), 0), reinterpret_tensor(primals_4, (152, 48), (1, 152), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 48), (768, 192, 48, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 3072, grid=grid(3072), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 48), (48, 1), 0), reinterpret_tensor(primals_6, (48, 1), (1, 48), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 152), (152, 1), 0), reinterpret_tensor(buf3, (64, 48), (48, 1), 0), primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((152, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((152, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((48, 152), (152, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Dense(nn.Module):
def __init__(self, in_features):
super(Dense, self).__init__()
self.fc1 = nn.Linear(in_features, 152)
self.fc2 = nn.Linear(152, 48)
self.fc3 = nn.Linear(48, 1)
nn.init.kaiming_uniform_(self.fc1.weight)
nn.init.kaiming_uniform_(self.fc2.weight)
nn.init.kaiming_uniform_(self.fc3.weight)
def maximization(self):
for param in self.fc1.parameters():
param.requires_grad_()
for param in self.fc2.parameters():
param.requires_grad_()
for param in self.fc3.parameters():
param.requires_grad_()
def expectation(self):
for param in self.fc1.parameters():
param.requires_grad_(False)
for param in self.fc2.parameters():
param.requires_grad_(False)
for param in self.fc3.parameters():
param.requires_grad_(False)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 152
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (152, 4), (4, 1))
assert_size_stride(primals_2, (152,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 152), (152, 1))
assert_size_stride(primals_5, (48,), (1,))
assert_size_stride(primals_6, (1, 48), (48, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 152), (152, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 152), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 152), (2432, 608, 152, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 152), (2432, 608, 152, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(9728)](buf1,
primals_2, buf7, 9728, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 152), (152, 1), 0),
reinterpret_tensor(primals_4, (152, 48), (1, 152), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 48), (768, 192, 48, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(3072)](buf3,
primals_5, buf6, 3072, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 48),
(48, 1), 0), reinterpret_tensor(primals_6, (48, 1), (1, 48), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 152), (152, 1), 0
), reinterpret_tensor(buf3, (64, 48), (48, 1), 0
), primals_6, buf6, primals_4, buf7
class DenseNew(nn.Module):
def __init__(self, in_features):
super(DenseNew, self).__init__()
self.fc1 = nn.Linear(in_features, 152)
self.fc2 = nn.Linear(152, 48)
self.fc3 = nn.Linear(48, 1)
nn.init.kaiming_uniform_(self.fc1.weight)
nn.init.kaiming_uniform_(self.fc2.weight)
nn.init.kaiming_uniform_(self.fc3.weight)
def maximization(self):
for param in self.fc1.parameters():
param.requires_grad_()
for param in self.fc2.parameters():
param.requires_grad_()
for param in self.fc3.parameters():
param.requires_grad_()
def expectation(self):
for param in self.fc1.parameters():
param.requires_grad_(False)
for param in self.fc2.parameters():
param.requires_grad_(False)
for param in self.fc3.parameters():
param.requires_grad_(False)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| cdslabamotong/GCNPP | Dense | false | 3,392 | [
"MIT"
] | 0 | 8445ed3f960e986e12e5a4d65e99e4125e6153c1 | https://github.com/cdslabamotong/GCNPP/tree/8445ed3f960e986e12e5a4d65e99e4125e6153c1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_features):
super().__init__()
self.fc1 = nn.Linear(in_features, 152)
self.fc2 = nn.Linear(152, 48)
self.fc3 = nn.Linear(48, 1)
nn.init.kaiming_uniform_(self.fc1.weight)
nn.init.kaiming_uniform_(self.fc2.weight)
nn.init.kaiming_uniform_(self.fc3.weight)
def maximization(self):
for param in self.fc1.parameters():
param.requires_grad_()
for param in self.fc2.parameters():
param.requires_grad_()
for param in self.fc3.parameters():
param.requires_grad_()
def expectation(self):
for param in self.fc1.parameters():
param.requires_grad_(False)
for param in self.fc2.parameters():
param.requires_grad_(False)
for param in self.fc3.parameters():
param.requires_grad_(False)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ResidualResidualDenseBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ry/cryiobypkawkmdldevamc2xgthshbnrm6is6ppwpjekkiy25lx7z.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pd/cpd4meipbe2zv2hwyhb6cjpvxof6tzs5i6jeu6dqinxcb26whgcv.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp20 & xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + ((-8) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3c/c3cglho5wyr4b4bwpkwiar5a4l3ewyyzusu3gf2a2qlomqutylik.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp32 & xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + ((-12) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x3), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/la/clagauvrhsnsmt2s2mrzhhfmw6omixx6owqe7iwdjdtls52tkc5v.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2, %where_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 20
x0 = xindex % 16
x2 = (xindex // 320)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-8) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp35 & xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + ((-12) + x1), tmp35 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tmp45 = tl.full([1], 20, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr7 + (x0 + (16*((-16) + x1)) + (64*x2)), tmp44 & xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + ((-16) + x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + (x3), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iu/ciubqy3qr45bvfhdurk7hjkhteasgy5mlezftic2idhmf7qyh6ar.py
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_4
# out => add
# out5 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {})
# %add : [num_users=7] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_mul_4 = async_compile.triton('triton_poi_fused_add_convolution_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/en/cenkdlub5s7metqwwcztlvkkh5pepylfek7tmpu7xhosuputncwi.py
# Topologically Sorted Source Nodes: [out5_2, mul_2, out_2, mul_3, out_3], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_2 => mul_14
# mul_3 => mul_15
# out5_2 => convolution_14
# out_2 => add_2
# out_3 => add_3
# Graph fragment:
# %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_11, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_14, 0.2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %add_1), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_mul_5 = async_compile.triton('triton_poi_fused_add_convolution_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x3), xmask)
tmp8 = tl.load(in_ptr2 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp3
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vk/cvko27mwejvcbcnzih7umvcocb2mv3j7rep5irxoey4dld4a3kzq.py
# Topologically Sorted Source Nodes: [conv2d_13, out4_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# out4_2 => gt_11, mul_13, where_11
# Graph fragment:
# %convolution_13 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_10, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_11 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_13, 0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_13, 0.2), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %convolution_13, %mul_13), kwargs = {})
# %gt_12 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_11, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_25, (4, ), (1, ))
assert_size_stride(primals_26, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (4, ), (1, ))
assert_size_stride(primals_28, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_29, (4, ), (1, ))
assert_size_stride(primals_30, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_31, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, primals_3, buf1, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf0, primals_3, buf2, primals_5, buf3, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf5, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_1, buf0, primals_3, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, grid=grid(1280), stream=stream0)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out5, mul, out], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_4.run(buf9, primals_11, primals_1, 256, grid=grid(256), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf9, buf10, primals_13, buf11, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf9, buf10, primals_13, buf12, primals_15, buf13, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_6], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf9, buf10, primals_13, buf12, primals_15, buf14, primals_17, buf15, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_7], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf9, buf10, primals_13, buf12, primals_15, buf14, primals_17, buf16, primals_19, buf17, 1280, grid=grid(1280), stream=stream0)
# Topologically Sorted Source Nodes: [out5_1], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [out5_1, mul_1, out_1], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_4.run(buf19, primals_21, buf9, 256, grid=grid(256), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 4, 4, 4), (64, 16, 4, 1))
buf21 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_8], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf19, buf20, primals_23, buf21, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 4, 4), (64, 16, 4, 1))
buf23 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_9], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf19, buf20, primals_23, buf22, primals_25, buf23, 768, grid=grid(768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 4, 4), (64, 16, 4, 1))
buf25 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_10], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf19, buf20, primals_23, buf22, primals_25, buf24, primals_27, buf25, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 4, 4, 4), (64, 16, 4, 1))
buf27 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_11], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf19, buf20, primals_23, buf22, primals_25, buf24, primals_27, buf26, primals_29, buf27, 1280, grid=grid(1280), stream=stream0)
# Topologically Sorted Source Nodes: [out5_2], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [out5_2, mul_2, out_2, mul_3, out_3], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_5.run(buf29, primals_31, buf19, primals_1, 256, grid=grid(256), stream=stream0)
del primals_31
buf30 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_13, out4_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf26, primals_29, buf30, 256, grid=grid(256), stream=stream0)
del buf26
del primals_29
buf31 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, out3_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf24, primals_27, buf31, 256, grid=grid(256), stream=stream0)
del buf24
del primals_27
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_11, out2_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf22, primals_25, buf32, 256, grid=grid(256), stream=stream0)
del buf22
del primals_25
buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_10, out1_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf20, primals_23, buf33, 256, grid=grid(256), stream=stream0)
del buf20
del primals_23
buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_8, out4_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf16, primals_19, buf34, 256, grid=grid(256), stream=stream0)
del buf16
del primals_19
buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, out3_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf14, primals_17, buf35, 256, grid=grid(256), stream=stream0)
del buf14
del primals_17
buf36 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, out2_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf12, primals_15, buf36, 256, grid=grid(256), stream=stream0)
del buf12
del primals_15
buf37 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, out1_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf10, primals_13, buf37, 256, grid=grid(256), stream=stream0)
del buf10
del primals_13
buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, out4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf6, primals_9, buf38, 256, grid=grid(256), stream=stream0)
del buf6
del primals_9
buf39 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, out3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf4, primals_7, buf39, 256, grid=grid(256), stream=stream0)
del buf4
del primals_7
buf40 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf2, primals_5, buf40, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, out1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6.run(buf0, primals_3, buf41, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
return (buf29, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf30, buf31, buf32, buf33, buf34, buf35, buf36, buf37, buf38, buf39, buf40, buf41, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn as nn
class ResidualDenseBlock(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualDenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, x: 'Tensor') ->Tensor:
identity = x
out1 = self.leaky_relu(self.conv1(x))
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3,
out4], 1)))
out = out5 * 0.2 + identity
return out
class ResidualResidualDenseBlock(nn.Module):
"""Multi-layer residual dense convolution block.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualResidualDenseBlock, self).__init__()
self.rdb1 = ResidualDenseBlock(channels, growths)
self.rdb2 = ResidualDenseBlock(channels, growths)
self.rdb3 = ResidualDenseBlock(channels, growths)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
identity = x
out = self.rdb1(x)
out = self.rdb2(out)
out = self.rdb3(out)
out = out * 0.2 + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'growths': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import Tensor
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp20 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + (-8 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tl.full([1], 16, tl.int64)
tmp35 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + (-12 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 20
x0 = xindex % 16
x2 = xindex // 320
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 12, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-8 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp35 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + (-12 + x1), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tl.full([1], 20, tl.int64)
tmp47 = tl.load(in_ptr7 + (x0 + 16 * (-16 + x1) + 64 * x2), tmp44 &
xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + (-16 + x1), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, xmask)
tmp8 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp3
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (4,), (1,))
assert_size_stride(primals_28, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_31, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, primals_3, buf1,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_1[grid(768)](primals_1, buf0, primals_3, buf2,
primals_5, buf3, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_2[grid(1024)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf5, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_3[grid(1280)](primals_1, buf0, primals_3, buf2,
primals_5, buf4, primals_7, buf6, primals_9, buf7, 1280, XBLOCK
=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_add_convolution_mul_4[grid(256)](buf9, primals_11,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_0[grid(512)](buf9, buf10, primals_13, buf11,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(768)](buf9, buf10, primals_13, buf12,
primals_15, buf13, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(1024)](buf9, buf10, primals_13, buf12,
primals_15, buf14, primals_17, buf15, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4, 4), (64, 16, 4, 1))
buf17 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(1280)](buf9, buf10, primals_13, buf12,
primals_15, buf14, primals_17, buf16, primals_19, buf17, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1))
buf19 = buf18
del buf18
triton_poi_fused_add_convolution_mul_4[grid(256)](buf19, primals_21,
buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 4, 4, 4), (64, 16, 4, 1))
buf21 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_0[grid(512)](buf19, buf20, primals_23, buf21,
512, XBLOCK=128, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 4, 4), (64, 16, 4, 1))
buf23 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(768)](buf19, buf20, primals_23, buf22,
primals_25, buf23, 768, XBLOCK=128, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 4, 4), (64, 16, 4, 1))
buf25 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(1024)](buf19, buf20, primals_23, buf22,
primals_25, buf24, primals_27, buf25, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 4, 4, 4), (64, 16, 4, 1))
buf27 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(1280)](buf19, buf20, primals_23, buf22,
primals_25, buf24, primals_27, buf26, primals_29, buf27, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1))
buf29 = buf28
del buf28
triton_poi_fused_add_convolution_mul_5[grid(256)](buf29, primals_31,
buf19, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf30 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf26, primals_29, buf30, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf26
del primals_29
buf31 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf24, primals_27, buf31, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf24
del primals_27
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf22, primals_25, buf32, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf22
del primals_25
buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf20, primals_23, buf33, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf20
del primals_23
buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf16, primals_19, buf34, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf16
del primals_19
buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf14, primals_17, buf35, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf14
del primals_17
buf36 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf12, primals_15, buf36, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf12
del primals_15
buf37 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf10, primals_13, buf37, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf10
del primals_13
buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf6, primals_9, buf38, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf39 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf4, primals_7, buf39, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf40 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf2, primals_5, buf40, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del primals_5
buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_6[grid(256)
](buf0, primals_3, buf41, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_3
return (buf29, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15,
buf17, buf19, buf21, buf23, buf25, buf27, buf30, buf31, buf32,
buf33, buf34, buf35, buf36, buf37, buf38, buf39, buf40, buf41)
class ResidualDenseBlock(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualDenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, x: 'Tensor') ->Tensor:
identity = x
out1 = self.leaky_relu(self.conv1(x))
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3,
out4], 1)))
out = out5 * 0.2 + identity
return out
class ResidualResidualDenseBlockNew(nn.Module):
"""Multi-layer residual dense convolution block.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super(ResidualResidualDenseBlockNew, self).__init__()
self.rdb1 = ResidualDenseBlock(channels, growths)
self.rdb2 = ResidualDenseBlock(channels, growths)
self.rdb3 = ResidualDenseBlock(channels, growths)
def forward(self, input_0):
primals_2 = self.rdb1.conv1.weight
primals_3 = self.rdb1.conv1.bias
primals_4 = self.rdb1.conv2.weight
primals_5 = self.rdb1.conv2.bias
primals_6 = self.rdb1.conv3.weight
primals_7 = self.rdb1.conv3.bias
primals_8 = self.rdb1.conv4.weight
primals_9 = self.rdb1.conv4.bias
primals_10 = self.rdb1.conv5.weight
primals_11 = self.rdb1.conv5.bias
primals_12 = self.rdb2.conv1.weight
primals_13 = self.rdb2.conv1.bias
primals_14 = self.rdb2.conv2.weight
primals_15 = self.rdb2.conv2.bias
primals_16 = self.rdb2.conv3.weight
primals_17 = self.rdb2.conv3.bias
primals_18 = self.rdb2.conv4.weight
primals_19 = self.rdb2.conv4.bias
primals_20 = self.rdb2.conv5.weight
primals_21 = self.rdb2.conv5.bias
primals_22 = self.rdb3.conv1.weight
primals_23 = self.rdb3.conv1.bias
primals_24 = self.rdb3.conv2.weight
primals_25 = self.rdb3.conv2.bias
primals_26 = self.rdb3.conv3.weight
primals_27 = self.rdb3.conv3.bias
primals_28 = self.rdb3.conv4.weight
primals_29 = self.rdb3.conv4.bias
primals_30 = self.rdb3.conv5.weight
primals_31 = self.rdb3.conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
| cyun-404/PieESRGAN | ResidualResidualDenseBlock | false | 3,393 | [
"Apache-2.0"
] | 0 | 22ffe683bf2389b646429494d1bc88e61a9d72c5 | https://github.com/cyun-404/PieESRGAN/tree/22ffe683bf2389b646429494d1bc88e61a9d72c5 | import torch
from torch import Tensor
import torch.nn as nn
class ResidualDenseBlock(nn.Module):
"""Achieves densely connected convolutional layers.
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super().__init__()
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1,
1), (1, 1))
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1,
1), (1, 1))
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1,
1), (1, 1))
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1,
1), (1, 1))
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1,
1), (1, 1))
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.identity = nn.Identity()
def forward(self, x: 'Tensor') ->Tensor:
identity = x
out1 = self.leaky_relu(self.conv1(x))
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3,
out4], 1)))
out = out5 * 0.2 + identity
return out
class Model(nn.Module):
"""Multi-layer residual dense convolution block.
Args:
channels (int): The number of channels in the input image.
growths (int): The number of channels that increase in each layer of convolution.
"""
def __init__(self, channels: 'int', growths: 'int') ->None:
super().__init__()
self.rdb1 = ResidualDenseBlock(channels, growths)
self.rdb2 = ResidualDenseBlock(channels, growths)
self.rdb3 = ResidualDenseBlock(channels, growths)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
identity = x
out = self.rdb1(x)
out = self.rdb2(out)
out = self.rdb3(out)
out = out * 0.2 + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
my_Hingeloss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xo/cxo7navcvfdngvouei3426orwczlourihu7qpxajq6oy5kvzifnp.py
# Topologically Sorted Source Nodes: [sub, mul_1, max_1, mul, pos, sub_1, loss, setitem, loss_1], Original ATen: [aten.rsub, aten.mul, aten.max, aten.sum, aten.sub, aten.add, aten.lift_fresh, aten.index_put, aten.mean]
# Source node to ATen node mapping:
# loss => add
# loss_1 => mean
# max_1 => max_1
# mul => mul
# mul_1 => mul_1
# pos => sum_1
# setitem => full_default, index_put
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %sum_1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%add, [%lt], %full_default), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%index_put,), kwargs = {})
triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = (rindex // 4)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), None)
tmp3 = tl.load(in_ptr1 + (r0 + (16*r1)), None)
tmp5 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), None)
tmp7 = tl.load(in_ptr1 + (4 + r0 + (16*r1)), None)
tmp10 = tl.load(in_ptr0 + (8 + r0 + (16*r1)), None)
tmp12 = tl.load(in_ptr1 + (8 + r0 + (16*r1)), None)
tmp15 = tl.load(in_ptr0 + (12 + r0 + (16*r1)), None)
tmp17 = tl.load(in_ptr1 + (12 + r0 + (16*r1)), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp6 = tmp1 - tmp5
tmp8 = tmp6 * tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp1 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp1 - tmp15
tmp18 = tmp16 * tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp3 * tmp0
tmp21 = tmp7 * tmp5
tmp22 = tmp20 + tmp21
tmp23 = tmp12 * tmp10
tmp24 = tmp22 + tmp23
tmp25 = tmp17 * tmp15
tmp26 = tmp24 + tmp25
tmp27 = tmp19 - tmp26
tmp28 = tmp27 + tmp1
tmp29 = 0.0
tmp30 = tmp28 < tmp29
tmp31 = tl.where(tmp30, tmp29, tmp28)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = 64.0
tmp36 = tmp34 / tmp35
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [sub, mul_1, max_1, mul, pos, sub_1, loss, setitem, loss_1], Original ATen: [aten.rsub, aten.mul, aten.max, aten.sum, aten.sub, aten.add, aten.lift_fresh, aten.index_put, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0.run(buf3, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class my_Hingeloss(nn.Module):
def __init__(self):
super(my_Hingeloss, self).__init__()
def forward(self, output, target):
pos = torch.sum(output * target, 2)
neg = torch.max((1 - target) * output, 2)
loss = neg[0] - pos + 1
loss[loss < 0] = 0
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None)
tmp3 = tl.load(in_ptr1 + (r0 + 16 * r1), None)
tmp5 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None)
tmp7 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), None)
tmp10 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None)
tmp12 = tl.load(in_ptr1 + (8 + r0 + 16 * r1), None)
tmp15 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None)
tmp17 = tl.load(in_ptr1 + (12 + r0 + 16 * r1), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp6 = tmp1 - tmp5
tmp8 = tmp6 * tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp1 - tmp10
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp1 - tmp15
tmp18 = tmp16 * tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp3 * tmp0
tmp21 = tmp7 * tmp5
tmp22 = tmp20 + tmp21
tmp23 = tmp12 * tmp10
tmp24 = tmp22 + tmp23
tmp25 = tmp17 * tmp15
tmp26 = tmp24 + tmp25
tmp27 = tmp19 - tmp26
tmp28 = tmp27 + tmp1
tmp29 = 0.0
tmp30 = tmp28 < tmp29
tmp31 = tl.where(tmp30, tmp29, tmp28)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp35 = 64.0
tmp36 = tmp34 / tmp35
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_index_put_lift_fresh_max_mean_mul_rsub_sub_sum_0[
grid(1)](buf3, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf3,
class my_HingelossNew(nn.Module):
def __init__(self):
super(my_HingelossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| carsault/chord_extraction_prediction_lib | my_Hingeloss | false | 3,394 | [
"MIT"
] | 0 | 6de09eef9f2852b56b04874d2e42eb504c96d33f | https://github.com/carsault/chord_extraction_prediction_lib/tree/6de09eef9f2852b56b04874d2e42eb504c96d33f | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
pos = torch.sum(output * target, 2)
neg = torch.max((1 - target) * output, 2)
loss = neg[0] - pos + 1
loss[loss < 0] = 0
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MatchingNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hp/chpwcehtnvunea72lynrdl2354n7kd5im7izncef4zxh3hgjqcui.py
# Topologically Sorted Source Nodes: [features_test], Original ATen: [aten.div]
# Source node to ATen node mapping:
# features_test => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zv/czvpfo7gqj3jium2inr5i7qioiijaa6e5yn7uhjgcjic3vk5adsy.py
# Topologically Sorted Source Nodes: [cosine_scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# cosine_scores => amax, exp, sub, sum_3
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp1 * tmp4
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp1 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp1 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ch/cchgkm4bkdcviz6qney2bnujdvljloavm3lff6gk7ruiijq74zm2.py
# Topologically Sorted Source Nodes: [cosine_scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# cosine_scores => amax, div_2, exp, sub, sum_3
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5w/c5wyv3yj7aerxpxxcpkv2wzu7op2zvwpuaqblti7h47srmwpy4pa.py
# Topologically Sorted Source Nodes: [clamp, scores_cls_1], Original ATen: [aten.clamp, aten.log]
# Source node to ATen node mapping:
# clamp => clamp_min_2
# scores_cls_1 => log
# Graph fragment:
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%bmm_1, 1e-07), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_2,), kwargs = {})
triton_poi_fused_clamp_log_3 = async_compile.triton('triton_poi_fused_clamp_log_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_log_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_log_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1e-07
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tl_math.log(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_test], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_3, buf0, 64, grid=grid(64), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_train], Original ATen: [aten.div]
triton_poi_fused_div_0.run(primals_1, buf1, 64, grid=grid(64), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_test, bmm], Original ATen: [aten.div, aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf4 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
# Topologically Sorted Source Nodes: [cosine_scores], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(primals_4, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [cosine_scores], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(primals_4, buf2, buf3, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf3
del buf4
buf6 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [scores_cls], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [clamp, scores_cls_1], Original ATen: [aten.clamp, aten.log]
triton_poi_fused_clamp_log_3.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
return (buf7, primals_4, buf2, buf6, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.optim
import torch.nn.functional as F
import torch.nn.parallel
class MatchingNetwork(nn.Module):
def __init__(self, opt):
super(MatchingNetwork, self).__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 10.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, features_test, features_train, labels_train):
"""Recognize novel categories based on the Matching Nets approach.
Classify the test examples (i.e., `features_test`) using the available
training examples (i.e., `features_test` and `labels_train`) using the
Matching Nets approach.
Args:
features_test: A 3D tensor with shape
[batch_size x num_test_examples x num_channels] that represents
the test features of each training episode in the batch.
features_train: A 3D tensor with shape
[batch_size x num_train_examples x num_channels] that represents
the train features of each training episode in the batch.
labels_train: A 3D tensor with shape
[batch_size x num_train_examples x nKnovel] that represents
the train labels (encoded as 1-hot vectors) of each training
episode in the batch.
Return:
scores_cls: A 3D tensor with shape
[batch_size x num_test_examples x nKnovel] that represents the
classification scores of the test feature vectors for the
nKnovel novel categories.
"""
assert features_train.dim() == 3
assert labels_train.dim() == 3
assert features_test.dim() == 3
assert features_train.size(0) == labels_train.size(0)
assert features_train.size(0) == features_test.size(0)
assert features_train.size(1) == labels_train.size(1)
assert features_train.size(2) == features_test.size(2)
batch_size, num_test_examples, _num_channels = features_test.size()
num_train_examples = features_train.size(1)
labels_train.size(2)
features_test = F.normalize(features_test, p=2, dim=features_test.
dim() - 1, eps=1e-12)
features_train = F.normalize(features_train, p=2, dim=
features_train.dim() - 1, eps=1e-12)
cosine_similarities = self.scale_cls * torch.bmm(features_test,
features_train.transpose(1, 2))
cosine_similarities = cosine_similarities.view(batch_size *
num_test_examples, num_train_examples)
cosine_scores = F.softmax(cosine_similarities)
cosine_scores = cosine_scores.view(batch_size, num_test_examples,
num_train_examples)
scores_cls = torch.bmm(cosine_scores, labels_train)
scores_cls = torch.log(torch.clamp(scores_cls, min=1e-07))
return scores_cls
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'opt': _mock_config(scale_cls=1.0)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 * tmp2
tmp5 = tmp1 * tmp4
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp1 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp1 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clamp_log_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1e-07
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tl_math.log(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(64)](primals_3, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_0[grid(64)](primals_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (4, 4, 4), (16, 1,
4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf4 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_4, buf2, buf3, buf4,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(64)](primals_4, buf2, buf3, buf4,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del buf4
buf6 = buf0
del buf0
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf6)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
triton_poi_fused_clamp_log_3[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf7, primals_4, buf2, buf6, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0)
class MatchingNetworkNew(nn.Module):
def __init__(self, opt):
super(MatchingNetworkNew, self).__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 10.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, input_0, input_1, input_2):
primals_4 = self.scale_cls
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Basasuya/FewShotWithoutForgetting | MatchingNetwork | false | 3,395 | [
"MIT"
] | 0 | eecc70e416ed82999124ddfca1b145f6dbcd74a6 | https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.optim
import torch.nn.functional as F
import torch.nn.parallel
class Model(nn.Module):
def __init__(self, opt):
super().__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 10.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, features_test, features_train, labels_train):
"""Recognize novel categories based on the Matching Nets approach.
Classify the test examples (i.e., `features_test`) using the available
training examples (i.e., `features_test` and `labels_train`) using the
Matching Nets approach.
Args:
features_test: A 3D tensor with shape
[batch_size x num_test_examples x num_channels] that represents
the test features of each training episode in the batch.
features_train: A 3D tensor with shape
[batch_size x num_train_examples x num_channels] that represents
the train features of each training episode in the batch.
labels_train: A 3D tensor with shape
[batch_size x num_train_examples x nKnovel] that represents
the train labels (encoded as 1-hot vectors) of each training
episode in the batch.
Return:
scores_cls: A 3D tensor with shape
[batch_size x num_test_examples x nKnovel] that represents the
classification scores of the test feature vectors for the
nKnovel novel categories.
"""
assert features_train.dim() == 3
assert labels_train.dim() == 3
assert features_test.dim() == 3
assert features_train.size(0) == labels_train.size(0)
assert features_train.size(0) == features_test.size(0)
assert features_train.size(1) == labels_train.size(1)
assert features_train.size(2) == features_test.size(2)
batch_size, num_test_examples, _num_channels = features_test.size()
num_train_examples = features_train.size(1)
labels_train.size(2)
features_test = F.normalize(features_test, p=2, dim=features_test.
dim() - 1, eps=1e-12)
features_train = F.normalize(features_train, p=2, dim=
features_train.dim() - 1, eps=1e-12)
cosine_similarities = self.scale_cls * torch.bmm(features_test,
features_train.transpose(1, 2))
cosine_similarities = cosine_similarities.view(batch_size *
num_test_examples, num_train_examples)
cosine_scores = F.softmax(cosine_similarities)
cosine_scores = cosine_scores.view(batch_size, num_test_examples,
num_train_examples)
scores_cls = torch.bmm(cosine_scores, labels_train)
scores_cls = torch.log(torch.clamp(scores_cls, min=1e-07))
return scores_cls
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return []
|
GroupNorm32 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zy/czyl6lkgkemly2rorp7dytnq5bsikpucv7yhz2var2hddv5xnkj4.py
# Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul_2
# sigmoid => sigmoid
# y => add, add_1, mul_1, rsqrt, var_mean
# y_1 => mul_3
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sigmoid), kwargs = {})
triton_per_fused_mul_native_group_norm_sigmoid_0 = async_compile.triton('triton_per_fused_mul_native_group_norm_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_native_group_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 4.0
tmp29 = tmp27 * tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = tmp27 * tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + (64*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_per_fused_mul_native_group_norm_sigmoid_0.run(buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, grid=grid(4), stream=stream0)
return (buf5, primals_1, primals_2, primals_3, buf0, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class GroupNorm32(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, x):
y = super().forward(x.float())
if self.swish == 1.0:
y = F.silu(y)
elif self.swish:
y = y * F.sigmoid(y * float(self.swish))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 4.0
tmp29 = tmp27 * tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = tmp27 * tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3,
buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
return buf5, primals_1, primals_2, primals_3, buf0, buf3
class GroupNorm32New(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dbanys/glide-text2im | GroupNorm32 | false | 3,396 | [
"MIT"
] | 0 | 5177545ec62f1fddc3075a8a69b63df3eb2256a5 | https://github.com/dbanys/glide-text2im/tree/5177545ec62f1fddc3075a8a69b63df3eb2256a5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, x):
y = super().forward(x.float())
if self.swish == 1.0:
y = F.silu(y)
elif self.swish:
y = y * F.sigmoid(y * float(self.swish))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1, 4, 4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256,
fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf6, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), buf5, primals_6, buf6, primals_4, buf7
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256,
fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| david-varela/continuous_control | Actor | false | 3,397 | [
"MIT"
] | 0 | 2bce9ea958fb21e88ac2f129ba8911e95dd7b1d2 | https://github.com/david-varela/continuous_control/tree/2bce9ea958fb21e88ac2f129ba8911e95dd7b1d2 | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256,
fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/au/cau6qypw2vz4drppp6yr6chutchyhnniousxhhlq2y5r3yu3gep5.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vh/cvhowampoosezwy5zm5vfkdmhzrvsn2u2gxpn4cchngk4b74ympu.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 128, grid=grid(128), stream=stream0)
del buf4
return (buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_layersDecod, hidden_size, output_size=2):
super(Discriminator, self).__init__()
self.map1 = nn.Linear(n_layersDecod * hidden_size, hidden_size)
self.map2 = nn.Linear(hidden_size, hidden_size)
self.map3 = nn.Linear(hidden_size, output_size)
self.n_layersDecod = n_layersDecod
self.hidden_size = hidden_size
def forward(self, x):
x = x.view(-1, self.n_layersDecod * self.hidden_size)
x = F.relu(self.map1(x))
x = F.relu(self.map2(x))
return nn.Softmax()(self.map3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layersDecod': 1, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf4, buf5, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
return buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_6, primals_4
class DiscriminatorNew(nn.Module):
def __init__(self, n_layersDecod, hidden_size, output_size=2):
super(DiscriminatorNew, self).__init__()
self.map1 = nn.Linear(n_layersDecod * hidden_size, hidden_size)
self.map2 = nn.Linear(hidden_size, hidden_size)
self.map3 = nn.Linear(hidden_size, output_size)
self.n_layersDecod = n_layersDecod
self.hidden_size = hidden_size
def forward(self, input_0):
primals_2 = self.map1.weight
primals_3 = self.map1.bias
primals_4 = self.map2.weight
primals_5 = self.map2.bias
primals_6 = self.map3.weight
primals_7 = self.map3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| carsault/chord_extraction_prediction_lib | Discriminator | false | 3,398 | [
"MIT"
] | 0 | 6de09eef9f2852b56b04874d2e42eb504c96d33f | https://github.com/carsault/chord_extraction_prediction_lib/tree/6de09eef9f2852b56b04874d2e42eb504c96d33f | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_layersDecod, hidden_size, output_size=2):
super().__init__()
self.map1 = nn.Linear(n_layersDecod * hidden_size, hidden_size)
self.map2 = nn.Linear(hidden_size, hidden_size)
self.map3 = nn.Linear(hidden_size, output_size)
self.n_layersDecod = n_layersDecod
self.hidden_size = hidden_size
def forward(self, x):
x = x.view(-1, self.n_layersDecod * self.hidden_size)
x = F.relu(self.map1(x))
x = F.relu(self.map2(x))
return nn.Softmax()(self.map3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1, 4]
|
my_BinaryCross | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/y7/cy7k65vqwk75ksc2p5vrvej4lop43vqloz5wl4lx4i2bxs2eomqq.py
# Topologically Sorted Source Nodes: [log, mul_1, sum_1], Original ATen: [aten.log, aten.mul, aten.sum]
# Source node to ATen node mapping:
# log => log
# mul_1 => mul_2
# sum_1 => sum_1
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %log), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [2]), kwargs = {})
triton_per_fused_log_mul_sum_0 = async_compile.triton('triton_per_fused_log_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tl_math.log(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4z/c4zkodasrxyvixxqjutr7zbjgbe2wm37wy57zcyczgstn64cmn4q.py
# Topologically Sorted Source Nodes: [exp, truediv, modif_beat, setitem, setitem_1, mul_2, mean, loss], Original ATen: [aten.exp, aten.reciprocal, aten.mul, aten.lift_fresh, aten.index_put, aten.mean, aten.neg]
# Source node to ATen node mapping:
# exp => exp
# loss => neg
# mean => mean
# modif_beat => mul_1
# mul_2 => mul_3
# setitem => full_default, index_put
# setitem_1 => full_default_1, index_put_1
# truediv => mul, reciprocal
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%exp,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 10), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.05000000074505806), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%mul_1, [%lt], %full_default), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.05000000074505806), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%gt], %full_default_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_put_1, %sum_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1 = async_compile.triton('triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp14 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = 10.0
tmp7 = tmp5 * tmp6
tmp8 = 7.0
tmp9 = tmp7 < tmp8
tmp10 = 0.05000000074505806
tmp11 = tl.where(tmp9, tmp10, tmp7)
tmp12 = tmp11 > tmp8
tmp13 = tl.where(tmp12, tmp10, tmp11)
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tmp21 = -tmp20
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log, mul_1, sum_1], Original ATen: [aten.log, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_log_mul_sum_0.run(arg2_1, arg1_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg1_1
del arg2_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [exp, truediv, modif_beat, setitem, setitem_1, mul_2, mean, loss], Original ATen: [aten.exp, aten.reciprocal, aten.mul, aten.lift_fresh, aten.index_put, aten.mean, aten.neg]
triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1.run(buf4, arg0_1, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class my_BinaryCross(nn.Module):
def __init__(self, args):
super(my_BinaryCross, self).__init__()
self.args = args
def forward(self, output, target, beat):
modif_beat = 1.0 / torch.exp(beat) * 10
modif_beat[modif_beat < 7] = 5 / 100
modif_beat[modif_beat > 7] = 5 / 100
batch_size = len(output)
len_pred = len(output[0])
loss = -torch.mean(modif_beat * torch.sum(target.view(batch_size,
len_pred, -1) * torch.log(output.view(batch_size, len_pred, -1)
), dim=2))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config()}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl_math.log(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp14 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = 10.0
tmp7 = tmp5 * tmp6
tmp8 = 7.0
tmp9 = tmp7 < tmp8
tmp10 = 0.05000000074505806
tmp11 = tl.where(tmp9, tmp10, tmp7)
tmp12 = tmp11 > tmp8
tmp13 = tl.where(tmp12, tmp10, tmp11)
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tmp21 = -tmp20
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_log_mul_sum_0[grid(16)](arg2_1, arg1_1, buf2, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
del arg2_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_exp_index_put_lift_fresh_mean_mul_neg_reciprocal_1[
grid(1)](buf4, arg0_1, buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf2
return buf4,
class my_BinaryCrossNew(nn.Module):
def __init__(self, args):
super(my_BinaryCrossNew, self).__init__()
self.args = args
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| carsault/chord_extraction_prediction_lib | my_BinaryCross | false | 3,399 | [
"MIT"
] | 0 | 6de09eef9f2852b56b04874d2e42eb504c96d33f | https://github.com/carsault/chord_extraction_prediction_lib/tree/6de09eef9f2852b56b04874d2e42eb504c96d33f | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, output, target, beat):
modif_beat = 1.0 / torch.exp(beat) * 10
modif_beat[modif_beat < 7] = 5 / 100
modif_beat[modif_beat > 7] = 5 / 100
batch_size = len(output)
len_pred = len(output[0])
loss = -torch.mean(modif_beat * torch.sum(target.view(batch_size,
len_pred, -1) * torch.log(output.view(batch_size, len_pred, -1)
), dim=2))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Perceptron | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ar/car4nt7cwc4ymvixnba2mhrc7jitzacxdsbheemcjm5oqnsqr7hi.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {})
triton_poi_fused_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_sigmoid_backward_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Perceptron(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.fc1 = nn.Linear(input_dim, 1)
def forward(self, x_in):
return torch.sigmoid(self.fc1(x_in)).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_sigmoid_backward_0[grid(64)](buf1,
primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class PerceptronNew(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.fc1 = nn.Linear(input_dim, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dbradf/nlp-pytorch | Perceptron | false | 3,400 | [
"Apache-2.0"
] | 0 | 957e3c5a1edf1f2ae9a8e281729395bed886bc87 | https://github.com/dbradf/nlp-pytorch/tree/957e3c5a1edf1f2ae9a8e281729395bed886bc87 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.fc1 = nn.Linear(input_dim, 1)
def forward(self, x_in):
return torch.sigmoid(self.fc1(x_in)).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BasicNN | # AOT ID: ['1_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wm/cwmlpspwkzmiv4efvkrjowkoawwpe42tnpzkiacrjumkzfjsy3qc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x_1 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, 255.0), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sp/cspbvdtjzgrogiqrdvxgtnc5djfqn22gfjy4t6u5773zfamx7rey.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div_1, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (2, 784), (784, 1))
assert_size_stride(primals_3, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784), (0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf1, buf4, 1, 2, grid=grid(1), stream=stream0)
del buf1
return (buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class BasicNN(nn.Module):
def __init__(self):
super(BasicNN, self).__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
x = x.float()
if isinstance(x, type(torch.randn(1))):
x = Variable(x)
x = x.view(1, 1, 28, 28)
x = x / 255.0
batch_size = x.size(0)
x = x.view(batch_size, -1)
output = self.net(x.float())
return F.softmax(output)
def get_inputs():
return [torch.rand([1, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (2, 784), (784, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 28, 28), (784, 1, 28, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(784)](primals_1, buf0, 784, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (1, 784),
(0, 1), 0), reinterpret_tensor(primals_2, (784, 2), (1, 784), 0
), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf1, buf4, 1, 2, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
return buf4, reinterpret_tensor(buf0, (1, 784), (784, 1), 0), buf4
class BasicNNNew(nn.Module):
def __init__(self):
super(BasicNNNew, self).__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, input_0):
primals_2 = self.net.weight
primals_3 = self.net.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dbczumar/clipper | BasicNN | false | 3,401 | [
"Apache-2.0"
] | 0 | 80c97d27a38d60caaebb2a1ae6a995dd7ff1c82d | https://github.com/dbczumar/clipper/tree/80c97d27a38d60caaebb2a1ae6a995dd7ff1c82d | import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
x = x.float()
if isinstance(x, type(torch.randn(1))):
x = Variable(x)
x = x.view(1, 1, 28, 28)
x = x / 255.0
batch_size = x.size(0)
x = x.view(batch_size, -1)
output = self.net(x.float())
return F.softmax(output)
def get_inputs():
return [torch.rand([1, 1, 28, 28])]
def get_init_inputs():
return []
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vb/cvbxdocetk5tj67qrxlvwxnmwf3z2efb37af24dzpjl5kw5bkmq5.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/q6/cq6z6xhbr2gyiw5id4lceemepctrgo25eo5gvvxlgbncy77qxm4p.py
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# mul => mul
# softmax => amax, clone, exp, sub, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 1.0), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_1 = async_compile.triton('triton_poi_fused__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp15, xmask)
tl.store(out_ptr2 + (x4), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qh/cqhxytgzmsgejzq2rbt7fdr5j2twqslr3w2cz4yhih2lbgvx6yly.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, clone, div, exp, sub, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uq/cuqseg2vikrs3ttioriunrdhmp6cffp4xwpf2b6xdz7w3atftosj.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sz/csz5twnkhe2zz3eaafwmgidmezpgvs3favuuva73uud6rs7ouhj6.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_5), kwargs = {})
triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, primals_2, buf2, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf0, primals_2, buf2, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf1, buf2, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf0, primals_2, buf6, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_5.run(buf10, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
return (buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch as th
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp15, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf0, primals_2, buf2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
triton_poi_fused__softmax_mul_1[grid(64)](buf0, primals_2, buf2,
buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf1, buf2, buf3, buf4, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(64)](buf0, primals_2, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_5[grid(64)](buf10, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0
), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0)
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttentionNew(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, input_0):
primals_1 = self.c_qkv.weight
primals_2 = self.c_qkv.bias
primals_4 = self.c_proj.weight
primals_5 = self.c_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| dbanys/glide-text2im | MultiheadAttention | false | 3,402 | [
"MIT"
] | 0 | 5177545ec62f1fddc3075a8a69b63df3eb2256a5 | https://github.com/dbanys/glide-text2im/tree/5177545ec62f1fddc3075a8a69b63df3eb2256a5 | import math
import torch
import torch.nn as nn
import torch as th
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class Model(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
QPCnet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ag/cagobkhg6zksrtmoxz3gmmvzs4og6vvdgk5z6zhmpxxskx7ctk32.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 2048) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zj/czjguysyq5o6w4yzlg4fqdzhaye4u35ozg5bi3w2jacdpmxjq3do.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 2048) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gy/cgy43zvn2wypuxgrt2yxuetq6bxcv4m3y52uv7xvavndocptjona.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/eb/ceb5ovxvsal3ijz4js32csi26cra3av2frikfq7e43ular3ryl5y.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 31744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 248) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/th/cthsssep3cthggxbf3w4a3qxu2ay3jmf2owklhclc5xzcxggn3ss.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 7680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 15
x2 = (xindex // 60)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1) + (248*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1) + (248*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1) + (248*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1) + (248*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/go/cgo6xyix23ktbrj72a52ar6kgydd5kmyuxxe7ifs4ipjnlxeiiil.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/to/ctovwyosdni4gstiprani7kclfcxzquauvfetpial7sblehhkvhj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gm/cgmziw6omyslbhxm2nq3gvrxs4vgccetk26rid5cy3chmfna43ew.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_6 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_7 = async_compile.triton('triton_poi_fused_sigmoid_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 15
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (8, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (128, 512), (512, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (32, 128), (128, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (1, 32), (32, 1))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 32), (16384, 2048, 32, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 32), (32768, 2048, 32, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 32, 16), (8192, 512, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 16, 32, 16), (8192, 512, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 31, 8), (7936, 248, 8, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf7, primals_7, 31744, grid=grid(31744), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 32, 15, 4), (1920, 60, 4, 1), torch.int8)
buf9 = empty_strided_cuda((4, 32, 15, 4), (1920, 60, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_4.run(buf7, buf8, buf9, 7680, grid=grid(7680), stream=stream0)
buf10 = empty_strided_cuda((15, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (15, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 128), (1, 512), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 1920, grid=grid(1920), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((15, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (128, 32), (1, 128), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_11, 480, grid=grid(480), stream=stream0)
del primals_11
buf14 = empty_strided_cuda((15, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_12, (32, 1), (1, 32), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_7.run(buf15, primals_13, 15, grid=grid(15), stream=stream0)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (15, 512), (512, 1), 0), buf11, buf13, buf15, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 2, 64, 64), (8192, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class QPCnet(nn.Module):
def __init__(self, num_classes=2):
super(QPCnet, self).__init__()
self.conv1 = nn.Conv2d(2, 8, 3, [1, 2], 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 3, 1, 1)
self.conv3 = nn.Conv2d(16, 32, 2, [1, 2])
self.fc1 = nn.Linear(32 * 4 * 4, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = self.pool(nn.functional.relu(self.conv3(x)))
x = x.view(-1, 32 * 4 * 4)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2048 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2048 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 31744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 248 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 15
x2 = xindex // 60
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1 + 248 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1 + 248 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1 + 248 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1 + 248 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 15
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (8, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (128, 512), (512, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (32, 128), (128, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (1, 32), (32, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 32), (16384, 2048, 32, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(65536)](buf1, primals_2,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 32), (32768, 2048, 32, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(131072)](buf3, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 32, 16), (8192, 512, 16, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 16, 32, 16), (8192, 512, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(32768)](buf3, buf4,
buf5, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 31, 8), (7936, 248, 8, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(31744)](buf7, primals_7,
31744, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 32, 15, 4), (1920, 60, 4, 1), torch.int8)
buf9 = empty_strided_cuda((4, 32, 15, 4), (1920, 60, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_4[grid(7680)](buf7, buf8,
buf9, 7680, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((15, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (15, 512), (512, 1), 0),
reinterpret_tensor(primals_8, (512, 128), (1, 512), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(1920)](buf11, primals_9, 1920, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((15, 32), (32, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (128, 32),
(1, 128), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(480)](buf13, primals_11, 480, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((15, 1), (1, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_12, (32, 1), (1,
32), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_sigmoid_7[grid(15)](buf15, primals_13, 15, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (15, 512), (512, 1
), 0), buf11, buf13, buf15, primals_12, primals_10, primals_8)
class QPCnetNew(nn.Module):
def __init__(self, num_classes=2):
super(QPCnetNew, self).__init__()
self.conv1 = nn.Conv2d(2, 8, 3, [1, 2], 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 3, 1, 1)
self.conv3 = nn.Conv2d(16, 32, 2, [1, 2])
self.fc1 = nn.Linear(32 * 4 * 4, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| davidwangtgs/CNN_PAC | QPCnet | false | 3,403 | [
"MIT"
] | 0 | d3824fc269ad5c86a962336e140b222856f26a2c | https://github.com/davidwangtgs/CNN_PAC/tree/d3824fc269ad5c86a962336e140b222856f26a2c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_classes=2):
super().__init__()
self.conv1 = nn.Conv2d(2, 8, 3, [1, 2], 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 3, 1, 1)
self.conv3 = nn.Conv2d(16, 32, 2, [1, 2])
self.fc1 = nn.Linear(32 * 4 * 4, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = self.pool(nn.functional.relu(self.conv3(x)))
x = x.view(-1, 32 * 4 * 4)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return []
|
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zx/czxlhx4s6te5f22sga2xc2brf4uagtr2xkl46odyfqb25nksyekm.py
# Topologically Sorted Source Nodes: [conv2d, img_out], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# img_out => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, img_out], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Block(nn.Module):
def __init__(self):
"""Initialisation for a lower-level DeepLPF conv block
:returns: N/A
:rtype: N/A
"""
super(Block, self).__init__()
def conv3x3(self, in_channels, out_channels, stride=1):
"""Represents a convolution of shape 3x3
:param in_channels: number of input channels
:param out_channels: number of output channels
:param stride: the convolution stride
:returns: convolution function with the specified parameterisation
:rtype: function
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=1, bias=True)
class ConvBlock(Block, nn.Module):
def __init__(self, num_in_channels, num_out_channels, stride=1):
"""Initialise function for the higher level convolution block
:param in_channels:
:param out_channels:
:param stride:
:param padding:
:returns:
:rtype:
"""
super(Block, self).__init__()
self.conv = self.conv3x3(num_in_channels, num_out_channels, stride=2)
self.lrelu = nn.LeakyReLU()
def forward(self, x):
""" Forward function for the higher level convolution block
:param x: Tensor representing the input BxCxWxH, where B is the batch size, C is the number of channels, W and H are the width and image height
:returns: Tensor representing the output of the block
:rtype: Tensor
"""
img_out = self.lrelu(self.conv(x))
return img_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in_channels': 4, 'num_out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf0, primals_2,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf2, primals_1, primals_3, buf1
class Block(nn.Module):
def __init__(self):
"""Initialisation for a lower-level DeepLPF conv block
:returns: N/A
:rtype: N/A
"""
super(Block, self).__init__()
def conv3x3(self, in_channels, out_channels, stride=1):
"""Represents a convolution of shape 3x3
:param in_channels: number of input channels
:param out_channels: number of output channels
:param stride: the convolution stride
:returns: convolution function with the specified parameterisation
:rtype: function
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=1, bias=True)
class ConvBlockNew(Block, nn.Module):
def __init__(self, num_in_channels, num_out_channels, stride=1):
"""Initialise function for the higher level convolution block
:param in_channels:
:param out_channels:
:param stride:
:param padding:
:returns:
:rtype:
"""
super(Block, self).__init__()
self.conv = self.conv3x3(num_in_channels, num_out_channels, stride=2)
self.lrelu = nn.LeakyReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| deshwalmahesh/CURL---cpu-gpu | ConvBlock | false | 3,404 | [
"BSD-3-Clause"
] | 0 | f4e87275b6cce556b9e04a188cf7ae13d810d82a | https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a | import torch
import torch.nn as nn
class Block(nn.Module):
def __init__(self):
"""Initialisation for a lower-level DeepLPF conv block
:returns: N/A
:rtype: N/A
"""
super().__init__()
def conv3x3(self, in_channels, out_channels, stride=1):
"""Represents a convolution of shape 3x3
:param in_channels: number of input channels
:param out_channels: number of output channels
:param stride: the convolution stride
:returns: convolution function with the specified parameterisation
:rtype: function
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=1, bias=True)
class Model(Block, nn.Module):
def __init__(self, num_in_channels, num_out_channels, stride=1):
"""Initialise function for the higher level convolution block
:param in_channels:
:param out_channels:
:param stride:
:param padding:
:returns:
:rtype:
"""
super().__init__()
self.conv = self.conv3x3(num_in_channels, num_out_channels, stride=2)
self.lrelu = nn.LeakyReLU()
def forward(self, x):
""" Forward function for the higher level convolution block
:param x: Tensor representing the input BxCxWxH, where B is the batch size, C is the number of channels, W and H are the width and image height
:returns: Tensor representing the output of the block
:rtype: Tensor
"""
img_out = self.lrelu(self.conv(x))
return img_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Joiner | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gq/cgq5uot5m3a7ty3vmitqwbgbtae266bhgd44su2hozhvztcnxukt.py
# Topologically Sorted Source Nodes: [logit, logit_1], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# logit => add
# logit_1 => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = (xindex // 16)
x3 = (xindex // 64)
x5 = xindex % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x6), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logit, logit_1], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Joiner(nn.Module):
def __init__(self, input_dim: 'int', output_dim: 'int'):
super().__init__()
self.output_linear = nn.Linear(input_dim, output_dim)
def forward(self, encoder_out: 'torch.Tensor', decoder_out: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
encoder_out:
Output from the encoder. Its shape is (N, T, C).
decoder_out:
Output from the decoder. Its shape is (N, U, C).
Returns:
Return a tensor of shape (N, T, U, C).
"""
assert encoder_out.ndim == decoder_out.ndim == 3
assert encoder_out.size(0) == decoder_out.size(0)
assert encoder_out.size(2) == decoder_out.size(2)
encoder_out = encoder_out.unsqueeze(2)
decoder_out = decoder_out.unsqueeze(1)
logit = encoder_out + decoder_out
logit = F.relu(logit)
output = self.output_linear(logit)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex // 16
x3 = xindex // 64
x5 = xindex % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x6, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class JoinerNew(nn.Module):
def __init__(self, input_dim: 'int', output_dim: 'int'):
super().__init__()
self.output_linear = nn.Linear(input_dim, output_dim)
def forward(self, input_0, input_1):
primals_3 = self.output_linear.weight
primals_4 = self.output_linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| desh2608/icefall | Joiner | false | 3,405 | [
"Apache-2.0"
] | 0 | 1603744469d167d848e074f2ea98c587153205fa | https://github.com/desh2608/icefall/tree/1603744469d167d848e074f2ea98c587153205fa | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim: 'int', output_dim: 'int'):
super().__init__()
self.output_linear = nn.Linear(input_dim, output_dim)
def forward(self, encoder_out: 'torch.Tensor', decoder_out: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
encoder_out:
Output from the encoder. Its shape is (N, T, C).
decoder_out:
Output from the decoder. Its shape is (N, U, C).
Returns:
Return a tensor of shape (N, T, U, C).
"""
assert encoder_out.ndim == decoder_out.ndim == 3
assert encoder_out.size(0) == decoder_out.size(0)
assert encoder_out.size(2) == decoder_out.size(2)
encoder_out = encoder_out.unsqueeze(2)
decoder_out = decoder_out.unsqueeze(1)
logit = encoder_out + decoder_out
logit = F.relu(logit)
output = self.output_linear(logit)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
WeldonPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lb/clb7usy7qfmld2idfcmg2r22i3nfxc6io5mv3kdkm6sxqnukrctc.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => sort
# Graph fragment:
# %sort : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%view, 2, True), kwargs = {})
triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True)
tmp7 = tmp6.to(tl.int64)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp5, xmask)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y7/cy7svbrvgizoup47rqhzlbrrwnvehommsnpvckyc5k6loax47jcb.py
# Topologically Sorted Source Nodes: [sum_1, div, sum_2, yMin, add], Original ATen: [aten.sum, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# div => div
# sum_1 => sum_1
# sum_2 => sum_2
# yMin => div_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_6, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_12, [2], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
triton_poi_fused_add_div_sum_1 = async_compile.triton('triton_poi_fused_add_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.int64)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_sort_0.run(arg0_1, buf0, buf3, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, div, sum_2, yMin, add], Original ATen: [aten.sum, aten.div, aten.add]
triton_poi_fused_add_div_sum_1.run(buf0, buf2, 16, grid=grid(16), stream=stream0)
del buf0
return (reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 15), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeldonPooling(nn.Module):
def __init__(self, nMax=1, nMin=None):
super(WeldonPooling, self).__init__()
self.nMax = nMax
if nMin is None:
self.nMin = nMax
else:
self.nMin = nMin
self.input = torch.Tensor()
self.output = torch.Tensor()
self.indicesMax = torch.Tensor()
self.indicesMin = torch.Tensor()
def forward(self, input):
self.batchSize = 0
self.numChannels = 0
self.h = 0
self.w = 0
if input.dim() == 4:
self.batchSize = input.size(0)
self.numChannels = input.size(1)
self.h = input.size(2)
self.w = input.size(3)
elif input.dim() == 3:
self.batchSize = 1
self.numChannels = input.size(0)
self.h = input.size(1)
self.w = input.size(2)
else:
None
self.input = input
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
x = input.view(self.batchSize, self.numChannels, self.h * self.w)
scoreSorted, indices = torch.sort(x, x.dim() - 1, True)
self.indicesMax = indices[:, :, 0:nMax]
self.output = torch.sum(scoreSorted[:, :, 0:nMax], dim=2, keepdim=True)
self.output = self.output.div(nMax)
if nMin > 0:
self.indicesMin = indices[:, :, self.h * self.w - nMin:self.h *
self.w]
yMin = torch.sum(scoreSorted[:, :, self.h * self.w - nMin:self.
h * self.w], 2, keepdim=True).div(nMin)
self.output = torch.add(self.output, yMin)
if input.dim() == 4:
self.output = self.output.view(self.batchSize, self.numChannels,
1, 1)
elif input.dim() == 3:
self.output = self.output.view(self.numChannels, 1, 1)
return self.output
def backward(self, grad_output, _indices_grad=None):
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
yMax = grad_output.clone().view(self.batchSize, self.numChannels, 1
).expand(self.batchSize, self.numChannels, nMax)
z = torch.zeros(self.batchSize, self.numChannels, self.h * self.w
).type_as(self.input)
z = z.scatter_(2, self.indicesMax, yMax).div(nMax)
if nMin > 0:
yMin = grad_output.clone().view(self.batchSize, self.numChannels, 1
).div(nMin).expand(self.batchSize, self.numChannels, nMin)
self.gradInput = z.scatter_(2, self.indicesMin, yMin).view(self
.batchSize, self.numChannels, self.h, self.w)
else:
self.gradInput = z.view(self.batchSize, self.numChannels, self.
h, self.w)
if self.input.dim() == 3:
self.gradInput = self.gradInput.view(self.numChannels, self.h,
self.w)
return self.gradInput
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable
=False, descending=True)
tmp7 = tmp6.to(tl.int64)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp5, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp7, xmask)
@triton.jit
def triton_poi_fused_add_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.int64)
get_raw_stream(0)
triton_per_fused_sort_0[grid(16)](arg0_1, buf0, buf3, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_add_div_sum_1[grid(16)](buf0, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0
), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 15
), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 0)
class WeldonPoolingNew(nn.Module):
def __init__(self, nMax=1, nMin=None):
super(WeldonPoolingNew, self).__init__()
self.nMax = nMax
if nMin is None:
self.nMin = nMax
else:
self.nMin = nMin
self.input = torch.Tensor()
self.output = torch.Tensor()
self.indicesMax = torch.Tensor()
self.indicesMin = torch.Tensor()
def backward(self, grad_output, _indices_grad=None):
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
yMax = grad_output.clone().view(self.batchSize, self.numChannels, 1
).expand(self.batchSize, self.numChannels, nMax)
z = torch.zeros(self.batchSize, self.numChannels, self.h * self.w
).type_as(self.input)
z = z.scatter_(2, self.indicesMax, yMax).div(nMax)
if nMin > 0:
yMin = grad_output.clone().view(self.batchSize, self.numChannels, 1
).div(nMin).expand(self.batchSize, self.numChannels, nMin)
self.gradInput = z.scatter_(2, self.indicesMin, yMin).view(self
.batchSize, self.numChannels, self.h, self.w)
else:
self.gradInput = z.view(self.batchSize, self.numChannels, self.
h, self.w)
if self.input.dim() == 3:
self.gradInput = self.gradInput.view(self.numChannels, self.h,
self.w)
return self.gradInput
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| debayan/dsve-loc | WeldonPooling | false | 3,406 | [
"BSD-3-Clause-Clear"
] | 0 | 21b1e1837668b6daa0881514d0756e9bec039fcb | https://github.com/debayan/dsve-loc/tree/21b1e1837668b6daa0881514d0756e9bec039fcb | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, nMax=1, nMin=None):
super().__init__()
self.nMax = nMax
if nMin is None:
self.nMin = nMax
else:
self.nMin = nMin
self.input = torch.Tensor()
self.output = torch.Tensor()
self.indicesMax = torch.Tensor()
self.indicesMin = torch.Tensor()
def forward(self, input):
self.batchSize = 0
self.numChannels = 0
self.h = 0
self.w = 0
if input.dim() == 4:
self.batchSize = input.size(0)
self.numChannels = input.size(1)
self.h = input.size(2)
self.w = input.size(3)
elif input.dim() == 3:
self.batchSize = 1
self.numChannels = input.size(0)
self.h = input.size(1)
self.w = input.size(2)
else:
None
self.input = input
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
x = input.view(self.batchSize, self.numChannels, self.h * self.w)
scoreSorted, indices = torch.sort(x, x.dim() - 1, True)
self.indicesMax = indices[:, :, 0:nMax]
self.output = torch.sum(scoreSorted[:, :, 0:nMax], dim=2, keepdim=True)
self.output = self.output.div(nMax)
if nMin > 0:
self.indicesMin = indices[:, :, self.h * self.w - nMin:self.h *
self.w]
yMin = torch.sum(scoreSorted[:, :, self.h * self.w - nMin:self.
h * self.w], 2, keepdim=True).div(nMin)
self.output = torch.add(self.output, yMin)
if input.dim() == 4:
self.output = self.output.view(self.batchSize, self.numChannels,
1, 1)
elif input.dim() == 3:
self.output = self.output.view(self.numChannels, 1, 1)
return self.output
def backward(self, grad_output, _indices_grad=None):
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
yMax = grad_output.clone().view(self.batchSize, self.numChannels, 1
).expand(self.batchSize, self.numChannels, nMax)
z = torch.zeros(self.batchSize, self.numChannels, self.h * self.w
).type_as(self.input)
z = z.scatter_(2, self.indicesMax, yMax).div(nMax)
if nMin > 0:
yMin = grad_output.clone().view(self.batchSize, self.numChannels, 1
).div(nMin).expand(self.batchSize, self.numChannels, nMin)
self.gradInput = z.scatter_(2, self.indicesMin, yMin).view(self
.batchSize, self.numChannels, self.h, self.w)
else:
self.gradInput = z.view(self.batchSize, self.numChannels, self.
h, self.w)
if self.input.dim() == 3:
self.gradInput = self.gradInput.view(self.numChannels, self.h,
self.w)
return self.gradInput
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HardNegativeContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6b/c6bixyrttikkg53cqwagks4mvhumeai6tdkva5oiiuq4gbnhahnc.py
# Topologically Sorted Source Nodes: [diag_2, mul, scores_1, sort_1], Original ATen: [aten.diag_embed, aten.mul, aten.sub, aten.sort]
# Source node to ATen node mapping:
# diag_2 => eq, full_default, iota, where
# mul => mul
# scores_1 => sub
# sort_1 => sort_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute_1, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 2), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %mul), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, 1, True), kwargs = {})
triton_per_fused_diag_embed_mul_sort_sub_0 = async_compile.triton('triton_per_fused_diag_embed_mul_sort_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_diag_embed_mul_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_diag_embed_mul_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last')
tmp1 = r1
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp1.to(tl.int16)
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, tmp14, = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp9, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bs/cbsxmmi256ux5vrzlxdpdqr5wob5stbqcvv56dyd2e4mzqbamq5p.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => sort
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, 0, True), kwargs = {})
triton_per_fused_sort_1 = async_compile.triton('triton_per_fused_sort_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r1)), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (x0 + (4*r1)), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wk/cwkjmujyv2r6zzrwcajoo66qsgvmg3m4hwbrrzlhogm6ow5ra2cv.py
# Topologically Sorted Source Nodes: [add, clamp, neg_cap, add_1, clamp_1, neg_img, loss], Original ATen: [aten.add, aten.clamp, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# clamp => clamp_min
# clamp_1 => clamp_min_1
# loss => add_2
# neg_cap => sum_1
# neg_img => sum_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_1, %expand), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %expand_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_clamp_sum_2 = async_compile.triton('triton_per_fused_add_clamp_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (5*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp2 - tmp1
tmp4 = tmp0 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp3
tmp12 = triton_helpers.maximum(tmp11, tmp5)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = tmp9 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [diag_2, mul, scores_1, sort_1], Original ATen: [aten.diag_embed, aten.mul, aten.sub, aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_diag_embed_mul_sort_sub_0.run(buf0, buf1, buf4, 4, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
triton_per_fused_sort_1.run(buf1, buf2, 4, 4, grid=grid(4), stream=stream0)
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [add, clamp, neg_cap, add_1, clamp_1, neg_img, loss], Original ATen: [aten.add, aten.clamp, aten.sum]
triton_per_fused_add_clamp_sum_2.run(buf8, buf2, buf0, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf2
del buf4
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HardNegativeContrastiveLoss(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super(HardNegativeContrastiveLoss, self).__init__()
self.margin = margin
self.nmax = nmax
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
scores = scores - 2 * torch.diag(scores.diag())
sorted_cap, _ = torch.sort(scores, 0, descending=True)
sorted_img, _ = torch.sort(scores, 1, descending=True)
max_c = sorted_cap[:self.nmax, :]
max_i = sorted_img[:, :self.nmax]
neg_cap = torch.sum(torch.clamp(max_c + (self.margin - diag).view(1,
-1).expand_as(max_c), min=0))
neg_img = torch.sum(torch.clamp(max_i + (self.margin - diag).view(-
1, 1).expand_as(max_i), min=0))
loss = neg_cap + neg_img
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_diag_embed_mul_sort_sub_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp1 = r1
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp1.to(tl.int16)
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp9, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask)
@triton.jit
def triton_per_fused_sort_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r1), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (x0 + 4 * r1), tmp5, xmask)
@triton.jit
def triton_per_fused_add_clamp_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 5 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp2 - tmp1
tmp4 = tmp0 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp3
tmp12 = triton_helpers.maximum(tmp11, tmp5)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = tmp9 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_diag_embed_mul_sort_sub_0[grid(4)](buf0, buf1,
buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sort_1[grid(4)](buf1, buf2, 4, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6
del buf6
triton_per_fused_add_clamp_sum_2[grid(1)](buf8, buf2, buf0, buf4, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf2
del buf4
return buf8,
class HardNegativeContrastiveLossNew(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super(HardNegativeContrastiveLossNew, self).__init__()
self.margin = margin
self.nmax = nmax
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debayan/dsve-loc | HardNegativeContrastiveLoss | false | 3,407 | [
"BSD-3-Clause-Clear"
] | 0 | 21b1e1837668b6daa0881514d0756e9bec039fcb | https://github.com/debayan/dsve-loc/tree/21b1e1837668b6daa0881514d0756e9bec039fcb | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super().__init__()
self.margin = margin
self.nmax = nmax
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
scores = scores - 2 * torch.diag(scores.diag())
sorted_cap, _ = torch.sort(scores, 0, descending=True)
sorted_img, _ = torch.sort(scores, 1, descending=True)
max_c = sorted_cap[:self.nmax, :]
max_i = sorted_img[:, :self.nmax]
neg_cap = torch.sum(torch.clamp(max_c + (self.margin - diag).view(1,
-1).expand_as(max_c), min=0))
neg_img = torch.sum(torch.clamp(max_i + (self.margin - diag).view(-
1, 1).expand_as(max_i), min=0))
loss = neg_cap + neg_img
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
ResidualAttentionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ov/covnoswvamrduqnlpu7ioajrnn3hhte55stjgoorf2z6wjvkkv75.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_3, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a3/ca3gcd62vddrpdf7wel6bpef44cv6vzsauaaigftsutida5ifj66.py
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# mul => mul_2
# softmax => amax, clone, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 1.0), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_3 = async_compile.triton('triton_poi_fused__softmax_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp15, xmask)
tl.store(out_ptr2 + (x4), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ry/crysvnpehef7t7qavdv4tjejxu2fplwpz5t7ziu4zobu2iuwbxgh.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, clone, div, exp, sub_1, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vt/cvtpzj4f2hmyerpchbm5i4eart4tas2pfh5e56t2solbcays5o7r.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4k/c4kxxzyxk45cygbwnqkt5vb2udxol67wuotkh6zmuwsinb63uprn.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wy/cwy4kxyjt4etqj66cdnstjhqnyeqdu5ftn4ptejfyamzau7n3xqh.py
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_2 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2f/c2fol3uyk6gyusnfl52autx5hdhf3lnnchrftxvbnmmxfs5li5o4.py
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_5, mul_6, rsqrt_1, sub_2
# x_2 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_6), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_8), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7s/c7sh7255dxwfgfvzmp2g3slml6c3g5jdyjpq3rxua55pu5gg2wfd.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add_6, erf, mul_7, mul_8, mul_9
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_6), kwargs = {})
triton_poi_fused_gelu_9 = async_compile.triton('triton_poi_fused_gelu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7n/c7nsuy663pmek7olaj7xnomqpsspowavb2zqwzrjkaz42y7dnrtc.py
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_2
# x_3 => add_3
# x_4 => add_7
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_14), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, grid=grid(64), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf3, primals_5, buf5, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_3.run(buf3, primals_5, buf5, buf4, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf4, buf5, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf3, primals_5, buf9, 64, grid=grid(64), stream=stream0)
del buf3
del primals_5
buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, primals_7, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf12, primals_7, buf13, buf14, primals_8, primals_9, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
triton_poi_fused_gelu_9.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf19, primals_1, buf12, primals_7, primals_13, 64, grid=grid(64), stream=stream0)
del primals_13
return (buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch as th
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, x: 'th.Tensor'):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp15, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_mul_2[grid(64)](buf3, primals_5, buf5, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
triton_poi_fused__softmax_mul_3[grid(64)](buf3, primals_5, buf5,
buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf4, buf5, buf6, buf7, buf8,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf7
triton_poi_fused_clone_5[grid(64)](buf3, primals_5, buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_5
buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
primals_7, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf12,
primals_7, buf13, buf14, primals_8, primals_9, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_9[grid(256)](buf16, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_10[grid(64)](buf19, primals_1, buf12,
primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16,
1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1,
1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), primals_4
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class ResidualAttentionBlockNew(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, input_0):
primals_4 = self.attn.c_qkv.weight
primals_5 = self.attn.c_qkv.bias
primals_6 = self.attn.c_proj.weight
primals_2 = self.attn.c_proj.bias
primals_3 = self.ln_1.weight
primals_7 = self.ln_1.bias
primals_10 = self.mlp.c_fc.weight
primals_11 = self.mlp.c_fc.bias
primals_12 = self.mlp.c_proj.weight
primals_8 = self.mlp.c_proj.bias
primals_9 = self.ln_2.weight
primals_13 = self.ln_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| dbanys/glide-text2im | ResidualAttentionBlock | false | 3,408 | [
"MIT"
] | 0 | 5177545ec62f1fddc3075a8a69b63df3eb2256a5 | https://github.com/dbanys/glide-text2im/tree/5177545ec62f1fddc3075a8a69b63df3eb2256a5 | import math
import torch
import torch.nn as nn
import torch as th
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class Model(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, x: 'th.Tensor'):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MidNet2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dl/cdl5lse44xdzrizubj67ug5s3wmtva33w7qokf7z6zcm6xxeepcr.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [2, 2], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rl/crlihktcxlelztj3nlaeiylpet5sp6oio26pbfd7oslmyooxhgan.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1], [2, 2], [2, 2], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 1048576, grid=grid(1048576), stream=stream0)
del buf6
del primals_7
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf10, primals_9, 1048576, grid=grid(1048576), stream=stream0)
del primals_9
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MidNet2(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 2
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(x_in))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet2, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0,
primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf3,
primals_5, buf4, buf5, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf8 = buf3
del buf3
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf6,
primals_7, buf7, buf8, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_1[grid(1048576)](buf10, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf4, buf5, buf7, buf8)
class MidNet2New(nn.Module):
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet2New, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| deshwalmahesh/CURL---cpu-gpu | MidNet2 | false | 3,409 | [
"BSD-3-Clause"
] | 0 | f4e87275b6cce556b9e04a188cf7ae13d810d82a | https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a | import torch
import torch.nn as nn
class Model(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 2
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(x_in))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super().__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return []
|
PredictionConvolutions | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wt/cwt5qmtvc7yxu72iuj2iy3dboxfdnpjie7tsbva6mk7drgkwx5ae.py
# Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# locs => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3, %view_4, %view_5], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1966080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 122880
x0 = xindex % 4
x2 = (xindex // 491520)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16384, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4096*((x0 + (4*x1)) % 16)) + (65536*(((x0 + (4*x1) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*x1)) // 16) % 4096)), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((x0 + (4*x1)) % 16), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 40960, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + ((4096*((x0 + (4*((-16384) + x1))) % 24)) + (98304*(((x0 + (4*((-16384) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-16384) + x1))) // 24) % 4096)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr3 + ((x0 + (4*((-16384) + x1))) % 24), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 65536, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + ((4096*((x0 + (4*((-40960) + x1))) % 24)) + (98304*(((x0 + (4*((-40960) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-40960) + x1))) // 24) % 4096)), tmp22, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr5 + ((x0 + (4*((-40960) + x1))) % 24), tmp22, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 90112, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tmp28 & tmp30
tmp32 = tl.load(in_ptr6 + ((4096*((x0 + (4*((-65536) + x1))) % 24)) + (98304*(((x0 + (4*((-65536) + x1)) + (98304*x2)) // 98304) % 4)) + (((x0 + (4*((-65536) + x1))) // 24) % 4096)), tmp31, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr7 + ((x0 + (4*((-65536) + x1))) % 24), tmp31, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp31, tmp34, tmp35)
tmp37 = tmp0 >= tmp29
tmp38 = tl.full([1], 106496, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tmp37 & tmp39
tmp41 = tl.load(in_ptr8 + ((4096*((x0 + (4*((-90112) + x1))) % 16)) + (65536*(((x0 + (4*((-90112) + x1)) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*((-90112) + x1))) // 16) % 4096)), tmp40, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr9 + ((x0 + (4*((-90112) + x1))) % 16), tmp40, eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 + tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp40, tmp43, tmp44)
tmp46 = tmp0 >= tmp38
tmp47 = tl.full([1], 122880, tl.int64)
tmp48 = tmp0 < tmp47
tmp49 = tl.load(in_ptr10 + ((4096*((x0 + (4*((-106496) + x1))) % 16)) + (65536*(((x0 + (4*((-106496) + x1)) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*((-106496) + x1))) // 16) % 4096)), tmp46, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr11 + ((x0 + (4*((-106496) + x1))) % 16), tmp46, eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp46, tmp51, tmp52)
tmp54 = tl.where(tmp40, tmp45, tmp53)
tmp55 = tl.where(tmp31, tmp36, tmp54)
tmp56 = tl.where(tmp22, tmp27, tmp55)
tmp57 = tl.where(tmp13, tmp18, tmp56)
tmp58 = tl.where(tmp4, tmp9, tmp57)
tl.store(out_ptr0 + (x3), tmp58, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_2, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_5, (24, ), (1, ))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_8, (24, ), (1, ))
assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_10, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (24, ), (1, ))
assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_13, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_14, (16, ), (1, ))
assert_size_stride(primals_15, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_16, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (16, ), (1, ))
assert_size_stride(primals_18, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_19, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_20, (16, ), (1, ))
assert_size_stride(primals_21, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_22, (24, ), (1, ))
assert_size_stride(primals_23, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_24, (24, ), (1, ))
assert_size_stride(primals_25, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_26, (24, ), (1, ))
assert_size_stride(primals_27, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (16, ), (1, ))
assert_size_stride(primals_29, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [l_conv4_3], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv7], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv8_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv9_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv10_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_15, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv11_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv4_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(primals_1, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv7], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(primals_6, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv8_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(primals_9, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv9_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_12, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 24, 64, 64), (98304, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv10_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(primals_15, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv11_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(primals_18, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf12 = empty_strided_cuda((4, 122880, 4), (491520, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_3, buf1, primals_5, buf2, primals_8, buf3, primals_11, buf4, primals_14, buf5, primals_17, buf12, 1966080, grid=grid(1966080), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
del primals_11
del primals_14
del primals_17
del primals_3
del primals_5
del primals_8
buf13 = empty_strided_cuda((4, 122880, 4), (491520, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [classes_scores], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf6, primals_20, buf7, primals_22, buf8, primals_24, buf9, primals_26, buf10, primals_28, buf11, primals_30, buf13, 1966080, grid=grid(1966080), stream=stream0)
del buf10
del buf11
del buf6
del buf7
del buf8
del buf9
del primals_20
del primals_22
del primals_24
del primals_26
del primals_28
del primals_30
return (buf12, buf13, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_23, primals_25, primals_27, primals_29, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
from itertools import product as product
import torch.optim
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6,
'conv10_2': 4, 'conv11_2': 4}
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4,
kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=
3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4,
kernel_size=3, padding=1)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes,
kernel_size=3, padding=1)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats,
conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats)
l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous()
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4)
l_conv7 = self.loc_conv7(conv7_feats)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous()
l_conv7 = l_conv7.view(batch_size, -1, 4)
l_conv8_2 = self.loc_conv8_2(conv8_2_feats)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous()
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous()
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous()
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous()
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4)
c_conv4_3 = self.cl_conv4_3(conv4_3_feats)
c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous()
c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes)
c_conv7 = self.cl_conv7(conv7_feats)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous()
c_conv7 = c_conv7.view(batch_size, -1, self.n_classes)
c_conv8_2 = self.cl_conv8_2(conv8_2_feats)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous()
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous()
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous()
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous()
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2,
l_conv10_2, l_conv11_2], dim=1)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2,
c_conv9_2, c_conv10_2, c_conv11_2], dim=1)
return locs, classes_scores
def get_inputs():
return [torch.rand([4, 512, 64, 64]), torch.rand([4, 1024, 64, 64]),
torch.rand([4, 512, 64, 64]), torch.rand([4, 256, 64, 64]), torch.
rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {'n_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
from itertools import product as product
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 122880
x0 = xindex % 4
x2 = xindex // 491520
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16384, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 +
4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096),
tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 40960, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (4096 * ((x0 + 4 * (-16384 + x1)) % 24) +
98304 * ((x0 + 4 * (-16384 + x1) + 98304 * x2) // 98304 % 4) + (x0 +
4 * (-16384 + x1)) // 24 % 4096), tmp13, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 65536, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (4096 * ((x0 + 4 * (-40960 + x1)) % 24) +
98304 * ((x0 + 4 * (-40960 + x1) + 98304 * x2) // 98304 % 4) + (x0 +
4 * (-40960 + x1)) // 24 % 4096), tmp22, eviction_policy=
'evict_last', other=0.0)
tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-40960 + x1)) % 24, tmp22,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 90112, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tmp28 & tmp30
tmp32 = tl.load(in_ptr6 + (4096 * ((x0 + 4 * (-65536 + x1)) % 24) +
98304 * ((x0 + 4 * (-65536 + x1) + 98304 * x2) // 98304 % 4) + (x0 +
4 * (-65536 + x1)) // 24 % 4096), tmp31, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-65536 + x1)) % 24, tmp31,
eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp31, tmp34, tmp35)
tmp37 = tmp0 >= tmp29
tmp38 = tl.full([1], 106496, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tmp37 & tmp39
tmp41 = tl.load(in_ptr8 + (4096 * ((x0 + 4 * (-90112 + x1)) % 16) +
65536 * ((x0 + 4 * (-90112 + x1) + 65536 * x2) // 65536 % 4) + (x0 +
4 * (-90112 + x1)) // 16 % 4096), tmp40, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-90112 + x1)) % 16, tmp40,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 + tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp40, tmp43, tmp44)
tmp46 = tmp0 >= tmp38
tl.full([1], 122880, tl.int64)
tmp49 = tl.load(in_ptr10 + (4096 * ((x0 + 4 * (-106496 + x1)) % 16) +
65536 * ((x0 + 4 * (-106496 + x1) + 65536 * x2) // 65536 % 4) + (x0 +
4 * (-106496 + x1)) // 16 % 4096), tmp46, eviction_policy=
'evict_last', other=0.0)
tmp50 = tl.load(in_ptr11 + (x0 + 4 * (-106496 + x1)) % 16, tmp46,
eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp46, tmp51, tmp52)
tmp54 = tl.where(tmp40, tmp45, tmp53)
tmp55 = tl.where(tmp31, tmp36, tmp54)
tmp56 = tl.where(tmp22, tmp27, tmp55)
tmp57 = tl.where(tmp13, tmp18, tmp56)
tmp58 = tl.where(tmp4, tmp9, tmp57)
tl.store(out_ptr0 + x3, tmp58, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_2, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_5, (24,), (1,))
assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_7, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_8, (24,), (1,))
assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_10, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (24,), (1,))
assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_13, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_14, (16,), (1,))
assert_size_stride(primals_15, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_16, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (16,), (1,))
assert_size_stride(primals_18, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_19, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_20, (16,), (1,))
assert_size_stride(primals_21, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_22, (24,), (1,))
assert_size_stride(primals_23, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_24, (24,), (1,))
assert_size_stride(primals_25, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_26, (24,), (1,))
assert_size_stride(primals_27, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_28, (16,), (1,))
assert_size_stride(primals_29, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf3 = extern_kernels.convolution(primals_12, primals_10, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf4 = extern_kernels.convolution(primals_15, primals_13, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = extern_kernels.convolution(primals_18, primals_16, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf6 = extern_kernels.convolution(primals_1, primals_19, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = extern_kernels.convolution(primals_6, primals_21, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf8 = extern_kernels.convolution(primals_9, primals_23, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf9 = extern_kernels.convolution(primals_12, primals_25, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf10 = extern_kernels.convolution(primals_15, primals_27, stride=(
1, 1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = extern_kernels.convolution(primals_18, primals_29, stride=(
1, 1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf12 = empty_strided_cuda((4, 122880, 4), (491520, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1966080)](buf0, primals_3, buf1,
primals_5, buf2, primals_8, buf3, primals_11, buf4, primals_14,
buf5, primals_17, buf12, 1966080, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
del primals_11
del primals_14
del primals_17
del primals_3
del primals_5
del primals_8
buf13 = empty_strided_cuda((4, 122880, 4), (491520, 4, 1), torch.
float32)
triton_poi_fused_cat_0[grid(1966080)](buf6, primals_20, buf7,
primals_22, buf8, primals_24, buf9, primals_26, buf10,
primals_28, buf11, primals_30, buf13, 1966080, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf10
del buf11
del buf6
del buf7
del buf8
del buf9
del primals_20
del primals_22
del primals_24
del primals_26
del primals_28
del primals_30
return (buf12, buf13, primals_1, primals_2, primals_4, primals_6,
primals_7, primals_9, primals_10, primals_12, primals_13,
primals_15, primals_16, primals_18, primals_19, primals_21,
primals_23, primals_25, primals_27, primals_29)
class PredictionConvolutionsNew(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutionsNew, self).__init__()
self.n_classes = n_classes
n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6,
'conv10_2': 4, 'conv11_2': 4}
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4,
kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=
3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4,
kernel_size=3, padding=1)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes,
kernel_size=3, padding=1)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_2 = self.loc_conv4_3.weight
primals_3 = self.loc_conv4_3.bias
primals_4 = self.loc_conv7.weight
primals_5 = self.loc_conv7.bias
primals_7 = self.loc_conv8_2.weight
primals_8 = self.loc_conv8_2.bias
primals_10 = self.loc_conv9_2.weight
primals_11 = self.loc_conv9_2.bias
primals_13 = self.loc_conv10_2.weight
primals_14 = self.loc_conv10_2.bias
primals_16 = self.loc_conv11_2.weight
primals_17 = self.loc_conv11_2.bias
primals_19 = self.cl_conv4_3.weight
primals_20 = self.cl_conv4_3.bias
primals_21 = self.cl_conv7.weight
primals_22 = self.cl_conv7.bias
primals_23 = self.cl_conv8_2.weight
primals_24 = self.cl_conv8_2.bias
primals_25 = self.cl_conv9_2.weight
primals_26 = self.cl_conv9_2.bias
primals_27 = self.cl_conv10_2.weight
primals_28 = self.cl_conv10_2.bias
primals_29 = self.cl_conv11_2.weight
primals_30 = self.cl_conv11_2.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
primals_15 = input_4
primals_18 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0], output[1]
| adityag6994/pytorch_ssd_training | PredictionConvolutions | false | 3,410 | [
"MIT"
] | 0 | 404f3cbef815e314337ec2c1b4f06a2403a7ce03 | https://github.com/adityag6994/pytorch_ssd_training/tree/404f3cbef815e314337ec2c1b4f06a2403a7ce03 | import torch
import torch.utils.data
from torch import nn
from itertools import product as product
import torch.optim
class Model(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super().__init__()
self.n_classes = n_classes
n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6,
'conv10_2': 4, 'conv11_2': 4}
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4,
kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=
3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4,
kernel_size=3, padding=1)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes,
kernel_size=3, padding=1)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats,
conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats)
l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous()
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4)
l_conv7 = self.loc_conv7(conv7_feats)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous()
l_conv7 = l_conv7.view(batch_size, -1, 4)
l_conv8_2 = self.loc_conv8_2(conv8_2_feats)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous()
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous()
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4)
# ... truncated (>4000 chars) for memory efficiency |
GIoU_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dh/cdhyccjtifbglt573uuow4zc7jwo4v4pgagqxf6uafhvfldwv5u4.py
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, ones, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, C_width, C_height, C, sub_11, div_1, GIoU, loss, lt_1, zeros_1, loss_1, sum_1], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.sum]
# Source node to ATen node mapping:
# Area_o => mul
# Area_t => mul_1
# C => mul_3
# C_height => sub_10
# C_width => sub_9
# GIoU => sub_12
# Inter => mul_2
# Inter_1 => where
# Inter_t => sub_7
# Inter_w => sub_5
# IoU => div_8
# Union => sub_8
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# div_1 => div_9
# loss => sub_13
# loss_1 => where_1
# lt => lt
# lt_1 => lt_1
# max_1 => max_1
# max_2 => max_2
# min_1 => min_1
# min_2 => min_2
# ones => full_default_1
# sub_11 => sub_11
# sub_4 => sub_4
# sub_6 => sub_6
# sum_1 => sum_1
# zeros => full_default
# zeros_1 => full_default_2
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_1, 1), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_3, 1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %select_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %sub_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %sub_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %sub_7), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%mul_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %mul_2), kwargs = {})
# %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %where), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, %sub_8), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %sub_10), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %sub_8), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_11, %mul_3), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_8, %div_9), kwargs = {})
# %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default_1, %sub_12), kwargs = {})
# %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%sub_13, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %full_default_2, %sub_13), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0 = async_compile.triton('triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4) % 4
r0 = rindex % 4
r2 = (rindex // 16)
r4 = rindex
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + (64*r2)), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + (64*r2)), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + (64*r2)), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + (64*r2)), None)
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1, 1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp23 = tmp22 >= tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp32 = tmp22 < tmp13
tmp33 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = tmp98 * tmp103
tmp110 = tmp99 * tmp104
tmp111 = tmp109 + tmp110
tmp112 = 0.0
tmp113 = tmp108 < tmp112
tmp114 = tl.where(tmp113, tmp112, tmp108)
tmp115 = tmp111 - tmp114
tmp116 = tmp114 / tmp115
tmp117 = tmp101 * tmp106
tmp118 = tmp117 - tmp115
tmp119 = tmp118 / tmp117
tmp120 = tmp116 - tmp119
tmp121 = 1.0
tmp122 = tmp121 - tmp120
tmp123 = tmp122 < tmp112
tmp124 = tl.where(tmp123, tmp112, tmp122)
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = tl.sum(tmp125, 1)[:, None]
tl.store(out_ptr5 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp127, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf7 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, ones, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, C_width, C_height, C, sub_11, div_1, GIoU, loss, lt_1, zeros_1, loss_1, sum_1], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0.run(arg0_1, arg1_1, buf7, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class GIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
GIoU = IoU.sub(torch.div(C.sub(Union), C))
ones = torch.ones_like(GIoU)
loss = ones.sub(GIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0(
in_ptr0, in_ptr1, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None)
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK,
RBLOCK]), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = tmp98 * tmp103
tmp110 = tmp99 * tmp104
tmp111 = tmp109 + tmp110
tmp112 = 0.0
tmp113 = tmp108 < tmp112
tmp114 = tl.where(tmp113, tmp112, tmp108)
tmp115 = tmp111 - tmp114
tmp116 = tmp114 / tmp115
tmp117 = tmp101 * tmp106
tmp118 = tmp117 - tmp115
tmp119 = tmp118 / tmp117
tmp120 = tmp116 - tmp119
tmp121 = 1.0
tmp122 = tmp121 - tmp120
tmp123 = tmp122 < tmp112
tmp124 = tl.where(tmp123, tmp112, tmp122)
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = tl.sum(tmp125, 1)[:, None]
tl.store(out_ptr5 + tl.full([XBLOCK, 1], 0, tl.int32), tmp127, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf7 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0[
grid(1)](arg0_1, arg1_1, buf7, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf7,
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class GIoU_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debrouchovea/ReproduceGoturn | GIoU_loss | false | 3,411 | [
"MIT"
] | 0 | d60f13c781ca612cacc17536530bbee989bdfa45 | https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45 | import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
GIoU = IoU.sub(torch.div(C.sub(Union), C))
ones = torch.ones_like(GIoU)
loss = ones.sub(GIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
IoU_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c7/cc7a7iruneqpizfdjr2uqyvumr3ou2fszkvkkj5awejd35ivqfwf.py
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, loss, loss_1, lt_1, zeros_1, loss_2, sum_1], Original ATen: [aten.max, aten.min, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.rsub, aten.sum]
# Source node to ATen node mapping:
# Area_o => mul
# Area_t => mul_1
# Inter => mul_2
# Inter_1 => where
# Inter_t => sub_7
# Inter_w => sub_5
# Union => sub_8
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# loss => div_8
# loss_1 => sub_9
# loss_2 => where_1
# lt => lt
# lt_1 => lt_1
# max_1 => max_1
# max_2 => max_2
# min_1 => min_1
# min_2 => min_2
# sub_4 => sub_4
# sub_6 => sub_6
# sum_1 => sum_1
# zeros => full_default
# zeros_1 => full_default_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_1, 1), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_3, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %select_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %sub_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %sub_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %sub_7), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%mul_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %mul_2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %where), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, %sub_8), kwargs = {})
# %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_8), kwargs = {})
# %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%sub_9, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %full_default_1, %sub_9), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0 = async_compile.triton('triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4) % 4
r0 = rindex % 4
r2 = (rindex // 16)
r4 = rindex
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + (64*r2)), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + (64*r2)), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + (64*r2)), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + (64*r2)), None)
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1, 1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp23 = tmp22 >= tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp32 = tmp22 < tmp13
tmp33 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp119 = tmp118 < tmp109
tmp120 = tl.where(tmp119, tmp109, tmp118)
tmp121 = tl.broadcast_to(tmp120, [XBLOCK, RBLOCK])
tmp123 = tl.sum(tmp121, 1)[:, None]
tl.store(out_ptr3 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp123, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, loss, loss_1, lt_1, zeros_1, loss_2, sum_1], Original ATen: [aten.max, aten.min, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.rsub, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0.run(arg0_1, arg1_1, buf6, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class IoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, _, _, _, _ = Interction_Union(outputs, targets)
zeros = torch.zeros_like(Inter)
loss = torch.div(Inter, Union)
loss = 1 - loss
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0(
in_ptr0, in_ptr1, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None)
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK,
RBLOCK]), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp119 = tmp118 < tmp109
tmp120 = tl.where(tmp119, tmp109, tmp118)
tmp121 = tl.broadcast_to(tmp120, [XBLOCK, RBLOCK])
tmp123 = tl.sum(tmp121, 1)[:, None]
tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp123, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0[
grid(1)](arg0_1, arg1_1, buf6, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf6,
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class IoU_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debrouchovea/ReproduceGoturn | IoU_loss | false | 3,412 | [
"MIT"
] | 0 | d60f13c781ca612cacc17536530bbee989bdfa45 | https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45 | import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, _, _, _, _ = Interction_Union(outputs, targets)
zeros = torch.zeros_like(Inter)
loss = torch.div(Inter, Union)
loss = 1 - loss
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GRUCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3f/c3f2pegqor4vp2nrxylvkwrxjdwn3yafpwxpmgbezlypbttqgwnr.py
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# hy => add_3
# inputgate => sigmoid_1
# mul => mul
# mul_1 => mul_1
# newgate => tanh
# resetgate => sigmoid
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, %getitem_4), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {})
# %tanh : [num_users=3] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sub_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sub_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sub_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask)
tmp19 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 - tmp18
tmp21 = tmp5 * tmp20
tmp22 = tmp18 + tmp21
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp18, xmask)
tl.store(out_ptr3 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate_h], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0.run(buf0, primals_3, buf1, primals_6, buf3, buf2, buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf0
del primals_3
return (buf5, primals_6, primals_1, reinterpret_tensor(buf1, (4, 4), (12, 1), 8), buf2, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
x = x.view(-1, x.size(-1))
gate_x = self.x2h(x)
gate_h = self.h2h(hidden)
gate_x = gate_x.squeeze()
gate_h = gate_h.squeeze()
i_r, i_i, i_n = gate_x.chunk(3, 1)
h_r, h_i, h_n = gate_h.chunk(3, 1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp19 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 - tmp18
tmp21 = tmp5 * tmp20
tmp22 = tmp18 + tmp21
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp22, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(16)](buf0,
primals_3, buf1, primals_6, buf3, buf2, buf4, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf0
del primals_3
return buf5, primals_6, primals_1, reinterpret_tensor(buf1, (4, 4), (12,
1), 8), buf2, buf3, buf4
class GRUCellNew(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, input_0, input_1):
primals_2 = self.x2h.weight
primals_3 = self.x2h.bias
primals_4 = self.h2h.weight
primals_5 = self.h2h.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| deutschmn/PM2.5-GNN | GRUCell | false | 3,413 | [
"MIT"
] | 0 | 82e3fe2f25465451cbbdd6350c91a0242ecaa1c1 | https://github.com/deutschmn/PM2.5-GNN/tree/82e3fe2f25465451cbbdd6350c91a0242ecaa1c1 | import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
class Model(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
x = x.view(-1, x.size(-1))
gate_x = self.x2h(x)
gate_h = self.h2h(hidden)
gate_x = gate_x.squeeze()
gate_h = gate_h.squeeze()
i_r, i_i, i_n = gate_x.chunk(3, 1)
h_r, h_i, h_n = gate_h.chunk(3, 1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
CIoU_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/uf/cufpfq4fn4ymxbg23zimzojpzuc36hazs7ihgqaw2ri4yolnhwnf.py
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, max_3, min_3, max_4, min_4, ones, Area_o_1, Area_t_1, add_13, add_11, sub_13, Inter_w_1, add_12, sub_15, Inter_t_1, Inter_2, lt_1, zeros_1, Inter_3, Union_1, IoU_1, sub_22, sub_20, pow_1, sub_21, pow_2, dis, C_width, C_height, C, pow_3, R_DIoU, loss, lt_2, zeros_2, loss_1, loss_2, div_3, arctan, div_4, arctan_1, sub_23, pow_4, mul_7, v, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, sub_24, add_16, alpha, R_CIoU, loss_3, lt_3, zeros_3, loss_4, sum_2], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.pow, aten.sum, aten.atan]
# Source node to ATen node mapping:
# Area_o => mul
# Area_o_1 => mul_3
# Area_t => mul_1
# Area_t_1 => mul_4
# C => mul_6
# C_height => sub_19
# C_width => sub_18
# Inter => mul_2
# Inter_1 => where
# Inter_2 => mul_5
# Inter_3 => where_1
# Inter_t => sub_7
# Inter_t_1 => sub_16
# Inter_w => sub_5
# Inter_w_1 => sub_14
# IoU => div_8
# IoU_1 => div_17
# R_CIoU => mul_8
# R_DIoU => div_18
# Union => sub_8
# Union_1 => sub_17
# add_11 => add_11
# add_12 => add_12
# add_13 => add_13
# add_16 => add_16
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# alpha => div_22
# arctan => atan
# arctan_1 => atan_1
# dis => add_14
# div_3 => div_19
# div_4 => div_20
# loss => add_15
# loss_1 => where_2
# loss_2 => sum_1
# loss_3 => add_17
# loss_4 => where_3
# lt => lt
# lt_1 => lt_1
# lt_2 => lt_2
# lt_3 => lt_3
# max_1 => max_1
# max_2 => max_2
# max_3 => max_3
# max_4 => max_4
# min_1 => min_1
# min_2 => min_2
# min_3 => min_3
# min_4 => min_4
# mul_7 => mul_7
# ones => full_default_2
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# sub_13 => sub_13
# sub_15 => sub_15
# sub_20 => sub_20
# sub_21 => sub_21
# sub_22 => sub_22
# sub_23 => sub_23
# sub_24 => sub_24
# sub_4 => sub_4
# sub_6 => sub_6
# sum_2 => sum_2
# v => div_21
# zeros => full_default
# zeros_1 => full_default_1
# zeros_2 => full_default_3
# zeros_3 => full_default_4
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_1, 1), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_3, 1), kwargs = {})
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_4, 1), kwargs = {})
# %min_3 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_5, 1), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_6, 1), kwargs = {})
# %min_4 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_7, 1), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_20, %select_22), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_21, %select_23), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_20, %select_21), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_8, %getitem_10), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_11, %sub_13), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_22, %select_23), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_12, %getitem_14), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %sub_15), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %sub_16), kwargs = {})
# %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%mul_5, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %full_default_1, %mul_5), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %where_1), kwargs = {})
# %div_17 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where_1, %sub_17), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default_2, %div_17), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_40, %select_42), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_20, 2), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_41, %select_43), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_21, 2), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_8, %getitem_10), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_12, %getitem_14), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_19), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_6, 2), kwargs = {})
# %div_18 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_14, %pow_3), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_22, %div_18), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_15, 0), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_2, %full_default_3, %add_15), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_2,), kwargs = {})
# %div_19 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_45, %select_47), kwargs = {})
# %atan : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%div_19,), kwargs = {})
# %div_20 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_44, %select_46), kwargs = {})
# %atan_1 : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%div_20,), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%atan, %atan_1), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_23, 2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_4, 4), kwargs = {})
# %div_21 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_7, 9.869604401089358), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %select_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %sub_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %sub_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %sub_7), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%mul_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %mul_2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %where), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, %sub_8), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_21, %div_8), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_24, 1), kwargs = {})
# %div_22 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, %add_16), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_22, %div_21), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_8), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_17, 0), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_3, %full_default_4, %add_17), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_3,), kwargs = {})
triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0 = async_compile.triton('triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr8, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4) % 4
r0 = rindex % 4
r2 = (rindex // 16)
r4 = rindex
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + (64*r2)), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + (64*r2)), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + (64*r2)), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + (64*r2)), None)
tmp119 = tl.load(in_ptr0 + (r3 + (64*r2)), None)
tmp120 = tl.load(in_ptr1 + (r3 + (64*r2)), None)
tmp123 = tl.load(in_ptr0 + (16 + r3 + (64*r2)), None)
tmp124 = tl.load(in_ptr1 + (16 + r3 + (64*r2)), None)
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1, 1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp23 = tmp22 >= tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp32 = tmp22 < tmp13
tmp33 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp121 = tmp119 - tmp120
tmp122 = tmp121 * tmp121
tmp125 = tmp123 - tmp124
tmp126 = tmp125 * tmp125
tmp127 = tmp122 + tmp126
tmp128 = tmp101 * tmp106
tmp129 = tmp128 * tmp128
tmp130 = tmp127 / tmp129
tmp131 = tmp118 + tmp130
tmp132 = tmp131 < tmp109
tmp133 = tl.where(tmp132, tmp109, tmp131)
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = tl.sum(tmp134, 1)[:, None]
tmp137 = tmp99 / tmp104
tmp138 = libdevice.atan(tmp137)
tmp139 = tmp98 / tmp103
tmp140 = libdevice.atan(tmp139)
tmp141 = tmp138 - tmp140
tmp142 = tmp141 * tmp141
tmp143 = 4.0
tmp144 = tmp142 * tmp143
tmp145 = 0.10132118364233778
tmp146 = tmp144 * tmp145
tmp147 = tmp146 - tmp116
tmp148 = tmp147 + tmp117
tmp149 = tmp146 / tmp148
tmp150 = tmp149 * tmp146
tmp151 = tmp136 + tmp150
tmp152 = tmp151 < tmp109
tmp153 = tl.where(tmp152, tmp109, tmp151)
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = tl.sum(tmp154, 1)[:, None]
tl.store(out_ptr8 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp156, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf13 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, max_3, min_3, max_4, min_4, ones, Area_o_1, Area_t_1, add_13, add_11, sub_13, Inter_w_1, add_12, sub_15, Inter_t_1, Inter_2, lt_1, zeros_1, Inter_3, Union_1, IoU_1, sub_22, sub_20, pow_1, sub_21, pow_2, dis, C_width, C_height, C, pow_3, R_DIoU, loss, lt_2, zeros_2, loss_1, loss_2, div_3, arctan, div_4, arctan_1, sub_23, pow_4, mul_7, v, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, sub_24, add_16, alpha, R_CIoU, loss_3, lt_3, zeros_3, loss_4, sum_2], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.pow, aten.sum, aten.atan]
stream0 = get_raw_stream(0)
triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0.run(arg0_1, arg1_1, buf13, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
class CIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, _x_max, _x_min, _y_max, _y_min = Interction_Union(outputs
, targets)
IoU = torch.div(Inter, Union)
loss_DIoU = DIoU_loss()
loss = loss_DIoU(outputs, targets)
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
v = torch.pow(torch.arctan(torch.div(width_t, height_t)).sub(torch.
arctan(torch.div(width_o, height_o))), 2) * 4 / (np.pi * np.pi)
alpha = torch.div(v, 1 + v.sub(IoU))
R_CIoU = torch.mul(alpha, v)
loss = torch.add(loss, R_CIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0(
in_ptr0, in_ptr1, out_ptr8, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None)
tmp119 = tl.load(in_ptr0 + (r3 + 64 * r2), None)
tmp120 = tl.load(in_ptr1 + (r3 + 64 * r2), None)
tmp123 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None)
tmp124 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None)
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK,
RBLOCK]), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp121 = tmp119 - tmp120
tmp122 = tmp121 * tmp121
tmp125 = tmp123 - tmp124
tmp126 = tmp125 * tmp125
tmp127 = tmp122 + tmp126
tmp128 = tmp101 * tmp106
tmp129 = tmp128 * tmp128
tmp130 = tmp127 / tmp129
tmp131 = tmp118 + tmp130
tmp132 = tmp131 < tmp109
tmp133 = tl.where(tmp132, tmp109, tmp131)
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = tl.sum(tmp134, 1)[:, None]
tmp137 = tmp99 / tmp104
tmp138 = libdevice.atan(tmp137)
tmp139 = tmp98 / tmp103
tmp140 = libdevice.atan(tmp139)
tmp141 = tmp138 - tmp140
tmp142 = tmp141 * tmp141
tmp143 = 4.0
tmp144 = tmp142 * tmp143
tmp145 = 0.10132118364233778
tmp146 = tmp144 * tmp145
tmp147 = tmp146 - tmp116
tmp148 = tmp147 + tmp117
tmp149 = tmp146 / tmp148
tmp150 = tmp149 * tmp146
tmp151 = tmp136 + tmp150
tmp152 = tmp151 < tmp109
tmp153 = tl.where(tmp152, tmp109, tmp151)
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = tl.sum(tmp154, 1)[:, None]
tl.store(out_ptr8 + tl.full([XBLOCK, 1], 0, tl.int32), tmp156, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf13 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0[
grid(1)](arg0_1, arg1_1, buf13, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf13,
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
class CIoU_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debrouchovea/ReproduceGoturn | CIoU_loss | false | 3,414 | [
"MIT"
] | 0 | d60f13c781ca612cacc17536530bbee989bdfa45 | https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45 | import torch
import numpy as np
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, _x_max, _x_min, _y_max, _y_min = Interction_Union(outputs
, targets)
IoU = torch.div(Inter, Union)
loss_DIoU = DIoU_loss()
loss = loss_DIoU(outputs, targets)
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
v = torch.pow(torch.arctan(torch.div(width_t, height_t)).sub(torch.
arctan(torch.div(width_o, height_o))), 2) * 4 / (np.pi * np.pi)
alpha = torch.div(v, 1 + v.sub(IoU))
R_CIoU = torch.mul(alpha, v)
loss = torch.add(loss, R_CIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MidNet4 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dl/cdl5lse44xdzrizubj67ug5s3wmtva33w7qokf7z6zcm6xxeepcr.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rl/crlihktcxlelztj3nlaeiylpet5sp6oio26pbfd7oslmyooxhgan.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 1048576, grid=grid(1048576), stream=stream0)
del buf6
del primals_7
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf10, primals_9, 1048576, grid=grid(1048576), stream=stream0)
del primals_9
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MidNet4(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 4
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(x_in))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet4, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4)
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0,
primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf3,
primals_5, buf4, buf5, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf8 = buf3
del buf3
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf6,
primals_7, buf7, buf8, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_1[grid(1048576)](buf10, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf4, buf5, buf7, buf8)
class MidNet4New(nn.Module):
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet4New, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| deshwalmahesh/CURL---cpu-gpu | MidNet4 | false | 3,415 | [
"BSD-3-Clause"
] | 0 | f4e87275b6cce556b9e04a188cf7ae13d810d82a | https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a | import torch
import torch.nn as nn
class Model(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 4
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(x_in))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super().__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4)
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return []
|
ParallelLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jv/cjvcsredzlnp5p23u3wgkkflope6kvqewy3nepikau7sddqcldfj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %unsqueeze), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class ParallelLinear(nn.Module):
def __init__(self, n_parallel, in_features, out_features, act=None,
random_bias=False):
super().__init__()
self.act = act
self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features,
out_features))
self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features))
with torch.no_grad():
self.weight.normal_(0.0, np.sqrt(2.0 / in_features))
if random_bias:
self.bias.normal_(0.0, np.sqrt(2.0 / in_features))
else:
self.bias.zero_()
def forward(self, x):
x = torch.bmm(x, self.weight) + self.bias[:, None, :]
if self.act:
x = self.act(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_parallel': 4, 'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0)
class ParallelLinearNew(nn.Module):
def __init__(self, n_parallel, in_features, out_features, act=None,
random_bias=False):
super().__init__()
self.act = act
self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features,
out_features))
self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features))
with torch.no_grad():
self.weight.normal_(0.0, np.sqrt(2.0 / in_features))
if random_bias:
self.bias.normal_(0.0, np.sqrt(2.0 / in_features))
else:
self.bias.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dholzmueller/nn_inconsistency | ParallelLinear | false | 3,416 | [
"Apache-2.0"
] | 0 | 67954d71cdbbc61fda7da1f624c19985b0e51708 | https://github.com/dholzmueller/nn_inconsistency/tree/67954d71cdbbc61fda7da1f624c19985b0e51708 | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_parallel, in_features, out_features, act=None,
random_bias=False):
super().__init__()
self.act = act
self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features,
out_features))
self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features))
with torch.no_grad():
self.weight.normal_(0.0, np.sqrt(2.0 / in_features))
if random_bias:
self.bias.normal_(0.0, np.sqrt(2.0 / in_features))
else:
self.bias.zero_()
def forward(self, x):
x = torch.bmm(x, self.weight) + self.bias[:, None, :]
if self.act:
x = self.act(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CPUForgetMult | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2s/c2srlvu3ig3l4rt4br537nskxrm55ugw7myg2outhdk64rd63rae.py
# Topologically Sorted Source Nodes: [sub_1, mul_2, h_3, stack], Original ATen: [aten.rsub, aten.mul, aten.add, aten.stack]
# Source node to ATen node mapping:
# h_3 => add_1
# mul_2 => mul_2
# stack => cat
# sub_1 => sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, %mul_2), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3],), kwargs = {})
triton_poi_fused_add_mul_rsub_stack_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_stack_0(in_ptr0, in_ptr1, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp1 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp6 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr0 + (x0), xmask)
tmp10 = tl.load(in_ptr1 + (x0), xmask)
tmp16 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp17 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp7 = tmp5 * tmp6
tmp8 = tmp3 - tmp5
tmp11 = tmp9 * tmp10
tmp12 = tmp8 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp4 * tmp13
tmp15 = tmp2 + tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp16
tmp20 = tmp19 * tmp15
tmp21 = tmp18 + tmp20
tl.store(out_ptr1 + (x0), tmp13, xmask)
tl.store(out_ptr2 + (x0), tmp11, xmask)
tl.store(out_ptr3 + (x0), tmp15, xmask)
tl.store(out_ptr4 + (x0), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 64) # alias
buf1 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) # alias
buf3 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 128) # alias
buf4 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 192) # alias
# Topologically Sorted Source Nodes: [sub_1, mul_2, h_3, stack], Original ATen: [aten.rsub, aten.mul, aten.add, aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_stack_0.run(arg0_1, arg1_1, buf2, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class CPUForgetMult(torch.nn.Module):
def __init__(self):
super(CPUForgetMult, self).__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_stack_0(in_ptr0, in_ptr1, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp1 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp6 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr0 + x0, xmask)
tmp10 = tl.load(in_ptr1 + x0, xmask)
tmp16 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp17 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp7 = tmp5 * tmp6
tmp8 = tmp3 - tmp5
tmp11 = tmp9 * tmp10
tmp12 = tmp8 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp4 * tmp13
tmp15 = tmp2 + tmp14
tmp18 = tmp16 * tmp17
tmp19 = tmp3 - tmp16
tmp20 = tmp19 * tmp15
tmp21 = tmp18 + tmp20
tl.store(out_ptr1 + x0, tmp13, xmask)
tl.store(out_ptr2 + x0, tmp11, xmask)
tl.store(out_ptr3 + x0, tmp15, xmask)
tl.store(out_ptr4 + x0, tmp21, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 64)
buf1 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
buf3 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 128)
buf4 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 192)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_stack_0[grid(64)](arg0_1, arg1_1,
buf2, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class CPUForgetMultNew(torch.nn.Module):
def __init__(self):
super(CPUForgetMultNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dido1998/cruxeval | CPUForgetMult | false | 3,417 | [
"BSD-3-Clause"
] | 0 | 229f7562c3f5e0da6432728e1c42402f51473a84 | https://github.com/dido1998/cruxeval/tree/229f7562c3f5e0da6432728e1c42402f51473a84 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, f, x, hidden_init=None):
result = []
forgets = f.split(1, dim=0)
prev_h = hidden_init
for i, h in enumerate((f * x).split(1, dim=0)):
if prev_h is not None:
h = h + (1 - forgets[i]) * prev_h
h = h.view(h.size()[1:])
result.append(h)
prev_h = h
return torch.stack(result)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SurnameClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [intermediate_vector], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# intermediate_vector => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [intermediate_vector], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [intermediate_vector, dropout], Original ATen: [aten.relu, aten.native_dropout]
buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True)
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [prediction_vector], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_5
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Linear
from torch.nn.functional import softmax
from torch.nn.functional import relu
from torch.nn.functional import dropout
class SurnameClassifier(Module):
def __init__(self, input_dim: 'int', hidden_dim: 'int', output_dim: 'int'
) ->None:
super().__init__()
self.fc1 = Linear(input_dim, hidden_dim)
self.fc2 = Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_activator: 'bool'=False):
intermediate_vector = relu(self.fc1(x_in))
prediction_vector = self.fc2(dropout(intermediate_vector, p=0.5))
if apply_activator:
prediction_vector = softmax(prediction_vector, dim=1)
return prediction_vector
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True)
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_5
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_4, buf6
class SurnameClassifierNew(Module):
def __init__(self, input_dim: 'int', hidden_dim: 'int', output_dim: 'int'
) ->None:
super().__init__()
self.fc1 = Linear(input_dim, hidden_dim)
self.fc2 = Linear(hidden_dim, output_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| dbradf/nlp-pytorch | SurnameClassifier | false | 3,418 | [
"Apache-2.0"
] | 0 | 957e3c5a1edf1f2ae9a8e281729395bed886bc87 | https://github.com/dbradf/nlp-pytorch/tree/957e3c5a1edf1f2ae9a8e281729395bed886bc87 | from torch.nn import Module
import torch
from torch.nn import Linear
from torch.nn.functional import softmax
from torch.nn.functional import relu
from torch.nn.functional import dropout
class Model(Module):
def __init__(self, input_dim: 'int', hidden_dim: 'int', output_dim: 'int'
) ->None:
super().__init__()
self.fc1 = Linear(input_dim, hidden_dim)
self.fc2 = Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_activator: 'bool'=False):
intermediate_vector = relu(self.fc1(x_in))
prediction_vector = self.fc2(dropout(intermediate_vector, p=0.5))
if apply_activator:
prediction_vector = softmax(prediction_vector, dim=1)
return prediction_vector
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
LeafClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class LeafClassifier(nn.Module):
def __init__(self, feature_size, hidden_size):
super(LeafClassifier, self).__init__()
self.mlp1 = nn.Linear(feature_size, hidden_size)
self.mlp2 = nn.Linear(hidden_size, 1)
def forward(self, input_feature):
output = torch.relu(self.mlp1(input_feature))
output = self.mlp2(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4
class LeafClassifierNew(nn.Module):
def __init__(self, feature_size, hidden_size):
super(LeafClassifierNew, self).__init__()
self.mlp1 = nn.Linear(feature_size, hidden_size)
self.mlp2 = nn.Linear(hidden_size, 1)
def forward(self, input_0):
primals_1 = self.mlp1.weight
primals_2 = self.mlp1.bias
primals_4 = self.mlp2.weight
primals_5 = self.mlp2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| dips4717/ui-hier-net | LeafClassifier | false | 3,419 | [
"MIT"
] | 0 | 7c93168b6150ea00e15638504cf561eda98de5c6 | https://github.com/dips4717/ui-hier-net/tree/7c93168b6150ea00e15638504cf561eda98de5c6 | import torch
import torch.utils.data
from torch import nn
class Model(nn.Module):
def __init__(self, feature_size, hidden_size):
super().__init__()
self.mlp1 = nn.Linear(feature_size, hidden_size)
self.mlp2 = nn.Linear(hidden_size, 1)
def forward(self, input_feature):
output = torch.relu(self.mlp1(input_feature))
output = self.mlp2(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvolutionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bs/cbstxeghddltznr7shuzsnth6ngv6mnftr2w7pqzzm5flm72plbl.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xd/cxdkgesb6qc4hbcvbeerj4gwfolzo6uujsc7nr7gmmdljjywjmd6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2f/c2fbrnmwutoz3lg6i3ej5xgc7434bcb7tt67cl3pv4byjybrp5cg.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.glu]
# Source node to ATen node mapping:
# x_2 => glu
# Graph fragment:
# %glu : [num_users=2] = call_function[target=torch.ops.aten.glu.default](args = (%convolution, 1), kwargs = {})
triton_poi_fused_glu_2 = async_compile.triton('triton_poi_fused_glu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (32*x1)), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/br/cbrspj634eg2bbs7vljhmrm74hrxshk4iwow2n6gl43bcvltbocl.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%glu, %primals_4, %primals_5, [1], [0], [1], False, [0], 4), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rz/crznhq7nfh5rcx6pvqbf3sjg2iedanxrp6n6uqq3gdf2c4dkwsjb.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_5 => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_4 = async_compile.triton('triton_poi_fused_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ac/caco4bolpuottiggpde2pfpdegtdihvqokvccpojwffpok2ar6b6.py
# Topologically Sorted Source Nodes: [x_5, sigmoid, x_7, x_8], Original ATen: [aten.native_layer_norm, aten.sigmoid, aten.mul, aten.convolution]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# x_5 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# x_7 => mul_2
# x_8 => convolution_2
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%permute_2,), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %sigmoid), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_2, %primals_8, %primals_9, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5 = async_compile.triton('triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp8 * tmp9
tl.store(out_ptr1 + (y0 + (4*x2) + (16*y1)), tmp10, xmask & ymask)
tl.store(out_ptr2 + (x2 + (4*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4), (32, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 128, grid=grid(128), stream=stream0)
del primals_3
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.glu]
triton_poi_fused_glu_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf5, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_4.run(buf5, buf6, buf7, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, sigmoid, x_7, x_8], Original ATen: [aten.native_layer_norm, aten.sigmoid, aten.mul, aten.convolution]
triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5.run(buf5, buf6, buf7, primals_6, primals_7, buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del buf6
del buf7
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4), (16, 4, 1))
del buf10
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf12, primals_9, 64, grid=grid(64), stream=stream0)
del primals_9
return (reinterpret_tensor(buf12, (4, 4, 4), (1, 16, 4), 0), primals_2, primals_4, primals_6, primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x: 'Tensor') ->Tensor:
"""Return Swich activation function."""
return x * torch.sigmoid(x)
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model.
Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
bias (bool): Whether to use bias in conv layers (default=True).
"""
def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True
) ->None:
"""Construct an ConvolutionModule object."""
super(ConvolutionModule, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels,
kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias
=bias)
self.norm = nn.LayerNorm(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1,
stride=1, padding=0, bias=bias)
self.activation = Swish()
def forward(self, x: 'Tensor') ->Tensor:
"""Compute convolution module.
Args:
x: Input tensor (#time, batch, channels).
Returns:
Tensor: Output tensor (#time, batch, channels).
"""
x = x.permute(1, 2, 0)
x = self.pointwise_conv1(x)
x = nn.functional.glu(x, dim=1)
x = self.depthwise_conv(x)
x = x.permute(0, 2, 1)
x = self.norm(x)
x = x.permute(0, 2, 1)
x = self.activation(x)
x = self.pointwise_conv2(x)
return x.permute(2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'kernel_size': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import Tensor
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp8 * tmp9
tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp10, xmask & ymask)
tl.store(out_ptr2 + (x2 + 4 * y3), tmp10, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4), (32, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
triton_poi_fused_glu_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_3[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(16)](buf5, buf6, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5[grid(
16, 4)](buf5, buf6, buf7, primals_6, primals_7, buf9, buf10, 16,
4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf6
del buf7
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4), (16, 4, 1))
del buf10
buf12 = buf11
del buf11
triton_poi_fused_convolution_3[grid(64)](buf12, primals_9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
return (reinterpret_tensor(buf12, (4, 4, 4), (1, 16, 4), 0), primals_2,
primals_4, primals_6, primals_7, primals_8, reinterpret_tensor(
primals_1, (4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5, buf9)
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x: 'Tensor') ->Tensor:
"""Return Swich activation function."""
return x * torch.sigmoid(x)
class ConvolutionModuleNew(nn.Module):
"""ConvolutionModule in Conformer model.
Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
bias (bool): Whether to use bias in conv layers (default=True).
"""
def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True
) ->None:
"""Construct an ConvolutionModule object."""
super(ConvolutionModuleNew, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels,
kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias
=bias)
self.norm = nn.LayerNorm(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1,
stride=1, padding=0, bias=bias)
self.activation = Swish()
def forward(self, input_0):
primals_2 = self.pointwise_conv1.weight
primals_3 = self.pointwise_conv1.bias
primals_4 = self.depthwise_conv.weight
primals_5 = self.depthwise_conv.bias
primals_6 = self.norm.weight
primals_7 = self.norm.bias
primals_8 = self.pointwise_conv2.weight
primals_9 = self.pointwise_conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| desh2608/icefall | ConvolutionModule | false | 3,420 | [
"Apache-2.0"
] | 0 | 1603744469d167d848e074f2ea98c587153205fa | https://github.com/desh2608/icefall/tree/1603744469d167d848e074f2ea98c587153205fa | import torch
from torch import Tensor
from torch import nn
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x: 'Tensor') ->Tensor:
"""Return Swich activation function."""
return x * torch.sigmoid(x)
class Model(nn.Module):
"""ConvolutionModule in Conformer model.
Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
bias (bool): Whether to use bias in conv layers (default=True).
"""
def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True
) ->None:
"""Construct an ConvolutionModule object."""
super().__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels,
kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias
=bias)
self.norm = nn.LayerNorm(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1,
stride=1, padding=0, bias=bias)
self.activation = Swish()
def forward(self, x: 'Tensor') ->Tensor:
"""Compute convolution module.
Args:
x: Input tensor (#time, batch, channels).
Returns:
Tensor: Output tensor (#time, batch, channels).
"""
x = x.permute(1, 2, 0)
x = self.pointwise_conv1(x)
x = nn.functional.glu(x, dim=1)
x = self.depthwise_conv(x)
x = x.permute(0, 2, 1)
x = self.norm(x)
x = x.permute(0, 2, 1)
x = self.activation(x)
x = self.pointwise_conv2(x)
return x.permute(2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 1]
|
LocalNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/we/cwerb6bh4wkw66tuuogvvdhshhz6u3cw5s3a4jqb23ftxyfx5pg2.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gd/cgdqnufkmolkhmahgpirghlilfdhf2tzbnkmaswr2jaio34ke2tp.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4h/c4hp7y3t3avb7y4kdpd3qny63wnyjqlzzqqpgqgesca5dwxpwjbq.py
# Topologically Sorted Source Nodes: [conv2d, x, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# conv2d => convolution
# pad_1 => _unsafe_index_2, _unsafe_index_3
# x => mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x4 = (xindex // 36)
x2 = (xindex // 36) % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.01
tmp5 = tmp3 * tmp4
tmp6 = tl.where(tmp0, tmp3, tmp5)
tl.store(out_ptr0 + (x5), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tz/ctz2qoenjoyo6tyt4vbajmakep7el2x5bm5bjwz5dxrqesj5uyf2.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_2, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 2304, grid=grid(2304), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, 4096, grid=grid(4096), stream=stream0)
buf3 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2.run(buf2, buf1, primals_3, buf3, 9216, grid=grid(9216), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf4, primals_5, buf5, buf6, 4096, grid=grid(4096), stream=stream0)
del buf4
del primals_5
return (buf6, primals_2, primals_4, buf0, buf2, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LocalNet(nn.Module):
def forward(self, x_in):
"""Defines a double convolution
:param x_in: input convolutional features
:returns: convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(self.refpad(x_in)))
x = self.lrelu(self.conv2(self.refpad(x)))
return x
def __init__(self, in_channels=16, out_channels=64):
"""Initialisation function
:param in_channels: number of input channels
:param out_channels: number of output channels
:returns: N/A
:rtype: N/A
"""
super(LocalNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1)
self.lrelu = nn.LeakyReLU()
self.refpad = nn.ReflectionPad2d(1)
def get_inputs():
return [torch.rand([4, 16, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.01
tmp5 = tmp3 * tmp4
tmp6 = tl.where(tmp0, tmp3, tmp5)
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_2, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(2304)](primals_1, buf0,
2304, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_1[grid(4096)](buf1,
primals_3, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2[grid(9216)](
buf2, buf1, primals_3, buf3, 9216, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
buf6 = buf1
del buf1
triton_poi_fused_convolution_leaky_relu_3[grid(4096)](buf4,
primals_5, buf5, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del primals_5
return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5
class LocalNetNew(nn.Module):
def __init__(self, in_channels=16, out_channels=64):
"""Initialisation function
:param in_channels: number of input channels
:param out_channels: number of output channels
:returns: N/A
:rtype: N/A
"""
super(LocalNetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1)
self.lrelu = nn.LeakyReLU()
self.refpad = nn.ReflectionPad2d(1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| deshwalmahesh/CURL---cpu-gpu | LocalNet | false | 3,421 | [
"BSD-3-Clause"
] | 0 | f4e87275b6cce556b9e04a188cf7ae13d810d82a | https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a | import torch
import torch.nn as nn
class Model(nn.Module):
def forward(self, x_in):
"""Defines a double convolution
:param x_in: input convolutional features
:returns: convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(self.refpad(x_in)))
x = self.lrelu(self.conv2(self.refpad(x)))
return x
def __init__(self, in_channels=16, out_channels=64):
"""Initialisation function
:param in_channels: number of input channels
:param out_channels: number of output channels
:returns: N/A
:rtype: N/A
"""
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1)
self.lrelu = nn.LeakyReLU()
self.refpad = nn.ReflectionPad2d(1)
def get_inputs():
return [torch.rand([4, 16, 4, 4])]
def get_init_inputs():
return []
|
DIoU_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/63/c636kmgmo34vlk5u2vfnvkukvng7zzniiqxd6lmll4llqk3nm56k.py
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, ones, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, sub_13, sub_11, pow_1, sub_12, pow_2, dis, C_width, C_height, C, pow_3, R_DIoU, loss, lt_1, zeros_1, loss_1, sum_1], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.pow, aten.sum]
# Source node to ATen node mapping:
# Area_o => mul
# Area_t => mul_1
# C => mul_3
# C_height => sub_10
# C_width => sub_9
# Inter => mul_2
# Inter_1 => where
# Inter_t => sub_7
# Inter_w => sub_5
# IoU => div_8
# R_DIoU => div_9
# Union => sub_8
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# dis => add_7
# loss => add_8
# loss_1 => where_1
# lt => lt
# lt_1 => lt_1
# max_1 => max_1
# max_2 => max_2
# min_1 => min_1
# min_2 => min_2
# ones => full_default_1
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sub_11 => sub_11
# sub_12 => sub_12
# sub_13 => sub_13
# sub_4 => sub_4
# sub_6 => sub_6
# sum_1 => sum_1
# zeros => full_default
# zeros_1 => full_default_2
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_1, 1), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_3, 1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %select_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %sub_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %sub_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %sub_7), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%mul_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %mul_2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %where), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, %sub_8), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default_1, %div_8), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_20, %select_22), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_11, 2), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_21, %select_23), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_12, 2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %getitem_2), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %getitem_6), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %sub_10), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_7, %pow_3), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_13, %div_9), kwargs = {})
# %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_8, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %full_default_2, %add_8), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0 = async_compile.triton('triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4) % 4
r0 = rindex % 4
r2 = (rindex // 16)
r4 = rindex
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + (64*r2)), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + (64*r2)), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + (64*r2)), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + (64*r2)), None)
tmp119 = tl.load(in_ptr0 + (r3 + (64*r2)), None)
tmp120 = tl.load(in_ptr1 + (r3 + (64*r2)), None)
tmp123 = tl.load(in_ptr0 + (16 + r3 + (64*r2)), None)
tmp124 = tl.load(in_ptr1 + (16 + r3 + (64*r2)), None)
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1, 1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp23 = tmp22 >= tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(32 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp32 = tmp22 < tmp13
tmp33 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + (tl.broadcast_to(32 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*((-4) + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + (tl.broadcast_to(16 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + (tl.broadcast_to(48 + r0 + (4*(4 + r1)) + (64*r2), [XBLOCK, RBLOCK])), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + (tl.broadcast_to(16 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + (tl.broadcast_to(48 + r0 + (4*r1) + (64*r2), [XBLOCK, RBLOCK])), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp121 = tmp119 - tmp120
tmp122 = tmp121 * tmp121
tmp125 = tmp123 - tmp124
tmp126 = tmp125 * tmp125
tmp127 = tmp122 + tmp126
tmp128 = tmp101 * tmp106
tmp129 = tmp128 * tmp128
tmp130 = tmp127 / tmp129
tmp131 = tmp118 + tmp130
tmp132 = tmp131 < tmp109
tmp133 = tl.where(tmp132, tmp109, tmp131)
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = tl.sum(tmp134, 1)[:, None]
tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp136, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1, max_2, min_2, ones, Area_o, Area_t, add_6, add_4, sub_4, Inter_w, add_5, sub_6, Inter_t, Inter, lt, zeros, Inter_1, Union, IoU, sub_13, sub_11, pow_1, sub_12, pow_2, dis, C_width, C_height, C, pow_3, R_DIoU, loss, lt_1, zeros_1, loss_1, sum_1], Original ATen: [aten.max, aten.min, aten.ones_like, aten.mul, aten.add, aten.sub, aten.lt, aten.zeros_like, aten.where, aten.div, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0.run(arg0_1, arg1_1, buf6, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0(
in_ptr0, in_ptr1, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None)
tmp119 = tl.load(in_ptr0 + (r3 + 64 * r2), None)
tmp120 = tl.load(in_ptr1 + (r3 + 64 * r2), None)
tmp123 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None)
tmp124 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None)
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK,
RBLOCK]), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp121 = tmp119 - tmp120
tmp122 = tmp121 * tmp121
tmp125 = tmp123 - tmp124
tmp126 = tmp125 * tmp125
tmp127 = tmp122 + tmp126
tmp128 = tmp101 * tmp106
tmp129 = tmp128 * tmp128
tmp130 = tmp127 / tmp129
tmp131 = tmp118 + tmp130
tmp132 = tmp131 < tmp109
tmp133 = tl.where(tmp132, tmp109, tmp131)
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = tl.sum(tmp134, 1)[:, None]
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp136, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0[
grid(1)](arg0_1, arg1_1, buf6, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf6,
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debrouchovea/ReproduceGoturn | DIoU_loss | false | 3,422 | [
"MIT"
] | 0 | d60f13c781ca612cacc17536530bbee989bdfa45 | https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45 | import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dw/cdwyx742hy7mnf755bi5wotiz44x7xxsxrkqosaq6ntl5d5ccmx7.py
# Topologically Sorted Source Nodes: [diag_s, add, cost_s, cost_s_1, sum_1, diag_im, add_1, cost_im, cost_im_1, sum_2, add_2], Original ATen: [aten.diag_embed, aten.add, aten.clamp, aten.sub, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# cost_im => clamp_min_1
# cost_im_1 => sub_3
# cost_s => clamp_min
# cost_s_1 => sub_2
# diag_im => eq_1, full_default_1, iota_2, where_1
# diag_s => eq, full_default, iota, where
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %mm), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute_1, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %where), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_2, %unsqueeze_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, %mm), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %permute_2, %full_default_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %where_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_clamp_diag_embed_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_diag_embed_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_diag_embed_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_diag_embed_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (r2), None)
tmp17 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last')
tmp1 = 0.2
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = r0
tmp8 = r1
tmp9 = tmp7 == tmp8
tmp10 = tmp2 + tmp0
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tl.where(tmp9, tmp11, tmp5)
tmp13 = tmp6 - tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tmp1 - tmp17
tmp19 = tmp18 + tmp3
tmp20 = triton_helpers.maximum(tmp19, tmp5)
tmp21 = tmp20 - tmp12
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp16 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [diag_s, add, cost_s, cost_s_1, sum_1, diag_im, add_1, cost_im, cost_im_1, sum_2, add_2], Original ATen: [aten.diag_embed, aten.add, aten.clamp, aten.sub, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_diag_embed_sub_sum_0.run(buf3, buf0, 1, 16, grid=grid(1), stream=stream0)
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ContrastiveLoss(nn.Module):
def __init__(self, margin=0.2):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
cost_s = torch.clamp((self.margin - diag).expand_as(scores) +
scores, min=0)
cost_im = torch.clamp((self.margin - diag.view(-1, 1)).expand_as(
scores) + scores, min=0)
diag_s = torch.diag(cost_s.diag())
diag_im = torch.diag(cost_im.diag())
cost_s = cost_s - diag_s
cost_im = cost_im - diag_im
return cost_s.sum() + cost_im.sum()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_clamp_diag_embed_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + r2, None)
tmp17 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp1 = 0.2
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = r0
tmp8 = r1
tmp9 = tmp7 == tmp8
tmp10 = tmp2 + tmp0
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tl.where(tmp9, tmp11, tmp5)
tmp13 = tmp6 - tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tmp1 - tmp17
tmp19 = tmp18 + tmp3
tmp20 = triton_helpers.maximum(tmp19, tmp5)
tmp21 = tmp20 - tmp12
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp16 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_clamp_diag_embed_sub_sum_0[grid(1)](buf3, buf0,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf3,
class ContrastiveLossNew(nn.Module):
def __init__(self, margin=0.2):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debayan/dsve-loc | ContrastiveLoss | false | 3,423 | [
"BSD-3-Clause-Clear"
] | 0 | 21b1e1837668b6daa0881514d0756e9bec039fcb | https://github.com/debayan/dsve-loc/tree/21b1e1837668b6daa0881514d0756e9bec039fcb | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, margin=0.2):
super().__init__()
self.margin = margin
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
cost_s = torch.clamp((self.margin - diag).expand_as(scores) +
scores, min=0)
cost_im = torch.clamp((self.margin - diag.view(-1, 1)).expand_as(
scores) + scores, min=0)
diag_s = torch.diag(cost_s.diag())
diag_im = torch.diag(cost_im.diag())
cost_s = cost_s - diag_s
cost_im = cost_im - diag_im
return cost_s.sum() + cost_im.sum()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
SimpleFloorModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j6/cj6yrxveidc7xf7aw2sd5chrhx3pw3r5egsdoorepnl2ey2ejho7.py
# Topologically Sorted Source Nodes: [c, floor], Original ATen: [aten.add, aten.floor]
# Source node to ATen node mapping:
# c => add
# floor => floor
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%add,), kwargs = {})
triton_poi_fused_add_floor_0 = async_compile.triton('triton_poi_fused_add_floor_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_floor_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.floor(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c, floor], Original ATen: [aten.add, aten.floor]
stream0 = get_raw_stream(0)
triton_poi_fused_add_floor_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.jit
import torch.onnx
import torch.nn
class SimpleFloorModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.jit
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.floor(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_floor_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SimpleFloorModuleNew(torch.nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| andreas-hommel/glow | SimpleFloorModule | false | 3,424 | [
"Apache-2.0"
] | 0 | 2bbbf8188a2a941e85677c83f2146bbd076a262e | https://github.com/andreas-hommel/glow/tree/2bbbf8188a2a941e85677c83f2146bbd076a262e | import torch
import torch.jit
import torch.onnx
import torch.nn
class Model(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
biaffine_mapping | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6e/c6ek4vaz4ftzm6ataust6sidvnoeklculi3ag7tscvwlv5jvr5k5.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.view]
# Source node to ATen node mapping:
# x => cat
# x_1 => view
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], 2), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat, [-1, 5]), kwargs = {})
triton_poi_fused_cat_view_0 = async_compile.triton('triton_poi_fused_cat_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_view_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20, 1), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_view_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 1, 4), 0), buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((5, 4, 5), (20, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class biaffine_mapping(nn.Module):
def __init__(self, input_size_x, input_size_y, output_size, bias_x,
bias_y, initializer=None):
super(biaffine_mapping, self).__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.output_size = output_size
self.initilizer = None
if self.bias_x:
input_size1 = input_size_x + 1
input_size2 = input_size_y + 1
self.biaffine_map = nn.Parameter(torch.Tensor(input_size1,
output_size, input_size2))
self.initialize()
def initialize(self):
if self.initilizer is None:
torch.nn.init.orthogonal_(self.biaffine_map)
else:
self.initilizer(self.biaffine_map)
def forward(self, x, y):
batch_size, bucket_size = x.shape[0], x.shape[1]
if self.bias_x:
x = torch.cat([x, torch.ones([batch_size, bucket_size, 1],
device=x.device)], axis=2)
if self.bias_y:
y = torch.cat([y, torch.ones([batch_size, bucket_size, 1],
device=y.device)], axis=2)
x_set_size, y_set_size = x.shape[-1], y.shape[-1]
x = x.reshape(-1, x_set_size)
biaffine_map = self.biaffine_map.reshape(x_set_size, -1)
biaffine_mapping = torch.matmul(x, biaffine_map).reshape(batch_size,
-1, y_set_size)
biaffine_mapping = biaffine_mapping.bmm(torch.transpose(y, 1, 2)
).reshape(batch_size, bucket_size, self.output_size, bucket_size)
biaffine_mapping = biaffine_mapping.transpose(2, 3)
return biaffine_mapping
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_x': 4, 'input_size_y': 4, 'output_size': 4,
'bias_x': 4, 'bias_y': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_view_0[grid(80)](primals_1, buf0, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20,
1), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_view_0[grid(80)](primals_2, buf2, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1),
0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 1, 4), 0
), buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0)
class biaffine_mappingNew(nn.Module):
def __init__(self, input_size_x, input_size_y, output_size, bias_x,
bias_y, initializer=None):
super(biaffine_mappingNew, self).__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.output_size = output_size
self.initilizer = None
if self.bias_x:
input_size1 = input_size_x + 1
input_size2 = input_size_y + 1
self.biaffine_map = nn.Parameter(torch.Tensor(input_size1,
output_size, input_size2))
self.initialize()
def initialize(self):
if self.initilizer is None:
torch.nn.init.orthogonal_(self.biaffine_map)
else:
self.initilizer(self.biaffine_map)
def forward(self, input_0, input_1):
primals_3 = self.biaffine_map
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| ciaochiaociao/CLNER | biaffine_mapping | false | 3,425 | [
"MIT"
] | 0 | a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
import torch.nn
class Model(nn.Module):
def __init__(self, input_size_x, input_size_y, output_size, bias_x,
bias_y, initializer=None):
super().__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.output_size = output_size
self.initilizer = None
if self.bias_x:
input_size1 = input_size_x + 1
input_size2 = input_size_y + 1
self.biaffine_map = nn.Parameter(torch.Tensor(input_size1,
output_size, input_size2))
self.initialize()
def initialize(self):
if self.initilizer is None:
torch.nn.init.orthogonal_(self.biaffine_map)
else:
self.initilizer(self.biaffine_map)
def forward(self, x, y):
batch_size, bucket_size = x.shape[0], x.shape[1]
if self.bias_x:
x = torch.cat([x, torch.ones([batch_size, bucket_size, 1],
device=x.device)], axis=2)
if self.bias_y:
y = torch.cat([y, torch.ones([batch_size, bucket_size, 1],
device=y.device)], axis=2)
x_set_size, y_set_size = x.shape[-1], y.shape[-1]
x = x.reshape(-1, x_set_size)
biaffine_map = self.biaffine_map.reshape(x_set_size, -1)
biaffine_mapping = torch.matmul(x, biaffine_map).reshape(batch_size,
-1, y_set_size)
biaffine_mapping = biaffine_mapping.bmm(torch.transpose(y, 1, 2)
).reshape(batch_size, bucket_size, self.output_size, bucket_size)
biaffine_mapping = biaffine_mapping.transpose(2, 3)
return biaffine_mapping
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_x': 4, 'input_size_y': 4, 'output_size': 4,
'bias_x': 4, 'bias_y': 4}]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v2/cv2ha4mes2yy34yfp3rs7w4jsqgoyz7khozf7gmpk6lmpcslkegp.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, 1.0), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# k => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf3, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf8, primals_8, 64, grid=grid(64), stream=stream0)
del primals_8
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4), (16, 4, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf11, primals_10, 64, grid=grid(64), stream=stream0)
del primals_10
return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
class MultiHeadAttention(nn.Module):
def __init__(self, channels, out_channels, n_heads, p_dropout=0.0,
window_size=None, heads_share=True, block_length=None,
proximal_bias=False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = *key.size(), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.
transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(
self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(
rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
assert t_s == t_t, 'Local attention is only available for self-attention.'
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores.masked_fill(block_mask == 0, -10000.0)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
2 * self.window_size + 1
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings, commons
.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0,
1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [
0, length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0,
length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [
length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
from torch.nn import functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_div_0[grid(64)](buf3, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf4 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = buf2
del buf2
triton_poi_fused_convolution_1[grid(64)](buf8, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf10, (4, 4, 4), (16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_1[grid(64)](buf11, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf11, buf7, primals_1, primals_3, primals_4, primals_6,
primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16,
4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class MultiHeadAttentionNew(nn.Module):
def __init__(self, channels, out_channels, n_heads, p_dropout=0.0,
window_size=None, heads_share=True, block_length=None,
proximal_bias=False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = *key.size(), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.
transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(
self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(
rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
assert t_s == t_t, 'Local attention is only available for self-attention.'
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores.masked_fill(block_mask == 0, -10000.0)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
2 * self.window_size + 1
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings, commons
.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0,
1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [
0, length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0,
length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [
length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def forward(self, input_0, input_1):
primals_1 = self.conv_q.weight
primals_2 = self.conv_q.bias
primals_4 = self.conv_k.weight
primals_5 = self.conv_k.bias
primals_7 = self.conv_v.weight
primals_8 = self.conv_v.bias
primals_9 = self.conv_o.weight
primals_10 = self.conv_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| dimitrijejankov/vits | MultiHeadAttention | false | 3,426 | [
"MIT"
] | 0 | d2f6385c7946c2355433804796b541ffae0a3d9f | https://github.com/dimitrijejankov/vits/tree/d2f6385c7946c2355433804796b541ffae0a3d9f | import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self, channels, out_channels, n_heads, p_dropout=0.0,
window_size=None, heads_share=True, block_length=None,
proximal_bias=False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = *key.size(), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.
transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(
self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(
rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
assert t_s == t_t, 'Local attention is only available for self-attention.'
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores.masked_fill(block_mask == 0, -10000.0)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
# ... truncated (>4000 chars) for memory efficiency |
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/d2/cd26tq7ga52mdxogiuulvwbmbjkpkq2yqyx64cyua3djeh4ftabe.py
# Topologically Sorted Source Nodes: [out, out_1, out_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel]
# Source node to ATen node mapping:
# out => convolution_1
# out_1 => add, rsqrt, var_mean
# out_2 => gt, mul_1, where
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {})
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (0))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp30 = tmp29 * tmp25
tmp31 = tl.where(tmp27, tmp25, tmp30)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp31, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ew/ceweeshye2lagr2qv2x4t34ozfsqhch3grjz26z4lj3a2xk667zu.py
# Topologically Sorted Source Nodes: [shortcuts, out_3, out_4, out_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel]
# Source node to ATen node mapping:
# out_3 => convolution_2
# out_4 => add_1, rsqrt_1, var_mean_1
# out_6 => gt_1, mul_3, where_1
# shortcuts => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_7, %primals_8, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_3, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_6), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_6, %mul_3), kwargs = {})
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_out_ptr2 + (r2 + (16*x3)), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (0))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp26 = tmp24 + tmp25
tmp27 = tmp2 - tmp12
tmp28 = tmp27 * tmp23
tmp29 = tmp28 + tmp26
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp34 = tmp33 * tmp29
tmp35 = tl.where(tmp31, tmp29, tmp34)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(in_out_ptr2 + (r2 + (16*x3)), tmp26, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp35, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [shortcuts], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf7 = reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf5 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, out_1, out_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0.run(buf3, buf7, primals_5, primals_6, buf4, buf8, 16, 16, grid=grid(16), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf14 = reinterpret_tensor(buf12, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf12 # reuse
buf1 = buf0; del buf0 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [shortcuts, out_3, out_4, out_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten._prelu_kernel]
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1.run(buf10, buf14, buf1, primals_8, primals_2, primals_6, buf11, buf15, 16, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_8
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_7, buf1, buf3, buf4, buf7, buf8, buf10, buf11, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ResBlock(nn.Module):
def __init__(self, in_chans, out_chans, drop_prob, same='False'):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=1)
self.conv1 = nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(out_chans)
self.relu = nn.PReLU()
self.conv2 = nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(out_chans)
self.same = same
def forward(self, input):
shortcuts = self.conv(input)
if self.same == 'True':
shortcuts = input
out = self.conv1(input)
out = self.in1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2(out)
out += shortcuts
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'out_chans': 4, 'drop_prob': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + 0)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp30 = tmp29 * tmp25
tmp31 = tl.where(tmp27, tmp25, tmp30)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp31, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1(
in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_out_ptr2 + (r2 + 16 * x3), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + 0)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp26 = tmp24 + tmp25
tmp27 = tmp2 - tmp12
tmp28 = tmp27 * tmp23
tmp29 = tmp28 + tmp26
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp34 = tmp33 * tmp29
tmp35 = tl.where(tmp31, tmp29, tmp34)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(in_out_ptr2 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp35, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf7 = reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf5
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0[
grid(16)](buf3, buf7, primals_5, primals_6, buf4, buf8, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf14 = reinterpret_tensor(buf12, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf12
buf1 = buf0
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1[
grid(16)](buf10, buf14, buf1, primals_8, primals_2, primals_6,
buf11, buf15, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
del primals_8
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_7,
buf1, buf3, buf4, buf7, buf8, buf10, buf11, buf14)
class ResBlockNew(nn.Module):
def __init__(self, in_chans, out_chans, drop_prob, same='False'):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=1)
self.conv1 = nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(out_chans)
self.relu = nn.PReLU()
self.conv2 = nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(out_chans)
self.same = same
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.relu.weight
primals_7 = self.conv2.weight
primals_8 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| divelab/mri | ResBlock | false | 3,427 | [
"MIT"
] | 0 | e181b446acfc6f9ac3f42657f710dd583e77d1aa | https://github.com/divelab/mri/tree/e181b446acfc6f9ac3f42657f710dd583e77d1aa | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_chans, out_chans, drop_prob, same='False'):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=1)
self.conv1 = nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(out_chans)
self.relu = nn.PReLU()
self.conv2 = nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(out_chans)
self.same = same
def forward(self, input):
shortcuts = self.conv(input)
if self.same == 'True':
shortcuts = input
out = self.conv1(input)
out = self.in1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2(out)
out += shortcuts
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MiniBatchDiscrimination | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/u5/cu5dkuy7wjkpuhlunr4rsoicbkn7l4bdm4dsnk5ifjkfldz6plxs.py
# Topologically Sorted Source Nodes: [sub, abs_1, norm, neg, expnorm, sum_2], Original ATen: [aten.sub, aten.abs, aten.sum, aten.neg, aten.exp]
# Source node to ATen node mapping:
# abs_1 => abs_1
# expnorm => exp
# neg => neg
# norm => sum_1
# sub => sub
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %permute), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [3]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0]), kwargs = {})
triton_poi_fused_abs_exp_neg_sub_sum_0 = async_compile.triton('triton_poi_fused_abs_exp_neg_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_exp_neg_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_exp_neg_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (16 + (4*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (17 + (4*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (18 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (19 + (4*x0)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (32 + (4*x0)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (33 + (4*x0)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (34 + (4*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr0 + (35 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr0 + (48 + (4*x0)), xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr0 + (49 + (4*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr0 + (50 + (4*x0)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr0 + (51 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp0 - tmp21
tmp23 = tl_math.abs(tmp22)
tmp25 = tmp4 - tmp24
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp23 + tmp26
tmp29 = tmp9 - tmp28
tmp30 = tl_math.abs(tmp29)
tmp31 = tmp27 + tmp30
tmp33 = tmp14 - tmp32
tmp34 = tl_math.abs(tmp33)
tmp35 = tmp31 + tmp34
tmp36 = -tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp20 + tmp37
tmp40 = tmp0 - tmp39
tmp41 = tl_math.abs(tmp40)
tmp43 = tmp4 - tmp42
tmp44 = tl_math.abs(tmp43)
tmp45 = tmp41 + tmp44
tmp47 = tmp9 - tmp46
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp45 + tmp48
tmp51 = tmp14 - tmp50
tmp52 = tl_math.abs(tmp51)
tmp53 = tmp49 + tmp52
tmp54 = -tmp53
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp38 + tmp55
tmp58 = tmp0 - tmp57
tmp59 = tl_math.abs(tmp58)
tmp61 = tmp4 - tmp60
tmp62 = tl_math.abs(tmp61)
tmp63 = tmp59 + tmp62
tmp65 = tmp9 - tmp64
tmp66 = tl_math.abs(tmp65)
tmp67 = tmp63 + tmp66
tmp69 = tmp14 - tmp68
tmp70 = tl_math.abs(tmp69)
tmp71 = tmp67 + tmp70
tmp72 = -tmp71
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp56 + tmp73
tl.store(out_ptr0 + (x2), tmp74, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ow/cowo7zsxbfhomz4ksno5fgpcv6pedpwuunbiu6n5udvebqdjydvk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %sub_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matrices], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16), (16, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, abs_1, norm, neg, expnorm, sum_2], Original ATen: [aten.sub, aten.abs, aten.sum, aten.neg, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_exp_neg_sub_sum_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_1, buf1, buf2, 32, grid=grid(32), stream=stream0)
del buf1
return (buf2, buf0, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import init
class MiniBatchDiscrimination(nn.Module):
"""
source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491
paper: Salimans et al. 2016. Improved Methods for Training GANs
"""
def __init__(self, in_features, out_features, kernel_dims, mean=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.kernel_dims = kernel_dims
self.mean = mean
self.T = nn.Parameter(torch.Tensor(in_features, out_features,
kernel_dims), requires_grad=True)
init.normal_(self.T, 0, 1)
def forward(self, x):
matrices = x.mm(self.T.view(self.in_features, -1))
matrices = matrices.view(-1, self.out_features, self.kernel_dims)
M = matrices.unsqueeze(0)
M_T = M.permute(1, 0, 2, 3)
norm = torch.abs(M - M_T).sum(3)
expnorm = torch.exp(-norm)
o_b = expnorm.sum(0) - 1
if self.mean:
o_b /= x.size(0) - 1
x = torch.cat([x, o_b], 1)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'kernel_dims': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_abs_exp_neg_sub_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr0 + (16 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (17 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (18 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (19 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (32 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (33 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (34 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (35 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp57 = tl.load(in_ptr0 + (48 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (49 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr0 + (50 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp68 = tl.load(in_ptr0 + (51 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp0 - tmp21
tmp23 = tl_math.abs(tmp22)
tmp25 = tmp4 - tmp24
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp23 + tmp26
tmp29 = tmp9 - tmp28
tmp30 = tl_math.abs(tmp29)
tmp31 = tmp27 + tmp30
tmp33 = tmp14 - tmp32
tmp34 = tl_math.abs(tmp33)
tmp35 = tmp31 + tmp34
tmp36 = -tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp20 + tmp37
tmp40 = tmp0 - tmp39
tmp41 = tl_math.abs(tmp40)
tmp43 = tmp4 - tmp42
tmp44 = tl_math.abs(tmp43)
tmp45 = tmp41 + tmp44
tmp47 = tmp9 - tmp46
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp45 + tmp48
tmp51 = tmp14 - tmp50
tmp52 = tl_math.abs(tmp51)
tmp53 = tmp49 + tmp52
tmp54 = -tmp53
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp38 + tmp55
tmp58 = tmp0 - tmp57
tmp59 = tl_math.abs(tmp58)
tmp61 = tmp4 - tmp60
tmp62 = tl_math.abs(tmp61)
tmp63 = tmp59 + tmp62
tmp65 = tmp9 - tmp64
tmp66 = tl_math.abs(tmp65)
tmp67 = tmp63 + tmp66
tmp69 = tmp14 - tmp68
tmp70 = tl_math.abs(tmp69)
tmp71 = tmp67 + tmp70
tmp72 = -tmp71
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp56 + tmp73
tl.store(out_ptr0 + x2, tmp74, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16),
(16, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_exp_neg_sub_sum_0[grid(16)](buf0, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](primals_1, buf1, buf2, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del buf1
return buf2, buf0, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class MiniBatchDiscriminationNew(nn.Module):
"""
source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491
paper: Salimans et al. 2016. Improved Methods for Training GANs
"""
def __init__(self, in_features, out_features, kernel_dims, mean=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.kernel_dims = kernel_dims
self.mean = mean
self.T = nn.Parameter(torch.Tensor(in_features, out_features,
kernel_dims), requires_grad=True)
init.normal_(self.T, 0, 1)
def forward(self, input_0):
primals_2 = self.T
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| danielnflam/GAN-Tests | MiniBatchDiscrimination | false | 3,428 | [
"BSD-3-Clause"
] | 0 | f112e27b802d717f64a8f2cfa79b9898667da14c | https://github.com/danielnflam/GAN-Tests/tree/f112e27b802d717f64a8f2cfa79b9898667da14c | import torch
import torch.nn as nn
from torch.nn import init
class Model(nn.Module):
"""
source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491
paper: Salimans et al. 2016. Improved Methods for Training GANs
"""
def __init__(self, in_features, out_features, kernel_dims, mean=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.kernel_dims = kernel_dims
self.mean = mean
self.T = nn.Parameter(torch.Tensor(in_features, out_features,
kernel_dims), requires_grad=True)
init.normal_(self.T, 0, 1)
def forward(self, x):
matrices = x.mm(self.T.view(self.in_features, -1))
matrices = matrices.view(-1, self.out_features, self.kernel_dims)
M = matrices.unsqueeze(0)
M_T = M.permute(1, 0, 2, 3)
norm = torch.abs(M - M_T).sum(3)
expnorm = torch.exp(-norm)
o_b = expnorm.sum(0) - 1
if self.mean:
o_b /= x.size(0) - 1
x = torch.cat([x, o_b], 1)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BetaMish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sk/cskdgdpcta6rni3mxihk5bjk7gwkzh6uemvucu2whaadq6wrok76.py
# Topologically Sorted Source Nodes: [exp, add, pow_1, log, tanh, mul], Original ATen: [aten.exp, aten.add, aten.pow, aten.log, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# add => add
# exp => exp
# log => log
# mul => mul
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 1.5), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%pow_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%log,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %tanh), kwargs = {})
triton_poi_fused_add_exp_log_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_exp_log_mul_pow_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_log_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_log_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 1.5
tmp5 = libdevice.pow(tmp3, tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp0 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, add, pow_1, log, tanh, mul], Original ATen: [aten.exp, aten.add, aten.pow, aten.log, aten.tanh, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_log_mul_pow_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BetaMish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
beta = 1.5
return x * torch.tanh(torch.log(torch.pow(1 + torch.exp(x), beta)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_log_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 1.5
tmp5 = libdevice.pow(tmp3, tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp0 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_log_mul_pow_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class BetaMishNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dcrmg/Efficient-Segmentation-Networks | BetaMish | false | 3,429 | [
"MIT"
] | 0 | e2f2d90d69e4e9af464678b0f02bc754c28f643d | https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
beta = 1.5
return x * torch.tanh(torch.log(torch.pow(1 + torch.exp(x), beta)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BasicMotionEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7k/c7klnf45hofe2rzqy3twad4emot2skmrgdktcktkddqzesb47nny.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 144
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 36
y1 = (yindex // 36)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (36*x2) + (147456*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cg/ccgjbjra22jzcgrej4cregu4qnsuv4valsbpzazlnyx6d4zadmny.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6c/c6cncuvxxhqnmxps5zcsamewoq74gve7wfzt5tsiyaefaumvnizq.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = (yindex // 2)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (2*x2) + (98*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/64/c64ftfrtg7wzrrafqmf23jsfazdeeslgcihlzd3ir6gzqnsqouzt.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = (yindex // 2)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (2*x2) + (8192*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zz/czzry3mptvotkg2o63ggvxe23r6aakp7p6x6j4z6chr5g2jg64yo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7c/c7caq6stn5xhqphn2xnmwbpvxspyfj5wahntqw4tlpltw5xu6ktg.py
# Topologically Sorted Source Nodes: [conv2d, cor], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# cor => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cg/ccgem2d3minyz4cphcb5m5gr254xubtv62ypjgbd2mjaj7ew6ps5.py
# Topologically Sorted Source Nodes: [cor_flo], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cor_flo => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-64) + x0), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6h/c6hm7sfjm5znh6zaavrhemfdbagmwselk6wdf455zjm4m6hri6gr.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_4, %primals_8], 1), kwargs = {})
triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 128
x0 = xindex % 4096
x2 = (xindex // 524288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 126, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((126*x0) + (516096*x2) + x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((2*x0) + (8192*x2) + ((-126) + x1)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oq/coqahmle73rziabtyvejxffgqwjt4fpmsrxv2jzdeffn5sl7uomz.py
# Topologically Sorted Source Nodes: [conv2d_4, out], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_11, %primals_12, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2064384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 126
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/25/c25ulitk3enogdxsfm25azyl53hdfygxtei67fnidtwtwfmjifcq.py
# Topologically Sorted Source Nodes: [conv2d_3, flo_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# flo_1 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (64, 36, 1, 1), (36, 1, 1, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 36, 64, 64), (147456, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_10, (64, ), (1, ))
assert_size_stride(primals_11, (126, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_12, (126, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 36, 64, 64), (147456, 1, 2304, 36), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 144, 4096, grid=grid(144, 4096), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((64, 2, 7, 7), (98, 1, 14, 2), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf2, 128, 49, grid=grid(128, 49), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((4, 2, 64, 64), (8192, 1, 128, 2), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_8, buf3, 8, 4096, grid=grid(8, 4096), stream=stream0)
del primals_8
buf4 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_9, buf4, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_9
buf5 = empty_strided_cuda((126, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_11, buf5, 16128, 9, grid=grid(16128, 9), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d, cor], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf7, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 1, 4096, 64))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf3, buf2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, flo], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf10, primals_7, 1048576, grid=grid(1048576), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf12 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32)
# Topologically Sorted Source Nodes: [cor_flo], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf8, primals_5, buf11, primals_10, buf12, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 126, 64, 64), (516096, 1, 8064, 126))
buf14 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(buf13, primals_12, buf3, buf14, 2097152, grid=grid(2097152), stream=stream0)
buf15 = empty_strided_cuda((4, 126, 64, 64), (516096, 1, 8064, 126), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, out], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf13, primals_12, buf15, 2064384, grid=grid(2064384), stream=stream0)
del buf13
del primals_12
buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, flo_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf11, primals_10, buf16, 1048576, grid=grid(1048576), stream=stream0)
del buf11
del primals_10
buf17 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, cor_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf8, primals_5, buf17, 1048576, grid=grid(1048576), stream=stream0)
del buf8
del primals_5
return (buf14, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf10, buf12, buf15, buf16, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 36, 1, 1), (36, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 36, 64, 64), (147456, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 2, 64, 64), (8192, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((126, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((126, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
self.args = args
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv = nn.Conv2d(64 + 64, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
def get_inputs():
return [torch.rand([4, 2, 64, 64]), torch.rand([4, 36, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(corr_levels=4, corr_radius=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 144
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 36
y1 = yindex // 36
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 36 * x2 + 147456 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 2 * x2 + 98 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 2 * x2 + 8192 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-64 + x0), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, None)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 128
x0 = xindex % 4096
x2 = xindex // 524288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 126, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (126 * x0 + 516096 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp15 = tl.load(in_ptr2 + (2 * x0 + 8192 * x2 + (-126 + x1)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 126
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (64, 36, 1, 1), (36, 1, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 36, 64, 64), (147456, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_10, (64,), (1,))
assert_size_stride(primals_11, (126, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_12, (126,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 36, 64, 64), (147456, 1, 2304, 36),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(144, 4096)](primals_3, buf0, 144, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_1[grid(4096, 9)](primals_4, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 2, 7, 7), (98, 1, 14, 2), torch.float32)
triton_poi_fused_2[grid(128, 49)](primals_6, buf2, 128, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((4, 2, 64, 64), (8192, 1, 128, 2), torch.
float32)
triton_poi_fused_3[grid(8, 4096)](primals_8, buf3, 8, 4096, XBLOCK=
128, YBLOCK=8, num_warps=4, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_1[grid(4096, 9)](primals_9, buf4, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf5 = empty_strided_cuda((126, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16128, 9)](primals_11, buf5, 16128, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf6 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(1048576)](buf7, primals_2,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf8 = extern_kernels.convolution(buf7, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf9 = extern_kernels.convolution(buf3, buf2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_5[grid(1048576)](buf10, primals_7,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf12 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_cat_6[grid(2097152)](buf8, primals_5, buf11,
primals_10, buf12, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 126, 64, 64), (516096, 1, 8064, 126))
buf14 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_7[grid(2097152)](buf13, primals_12, buf3,
buf14, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = empty_strided_cuda((4, 126, 64, 64), (516096, 1, 8064, 126),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(2064384)](
buf13, primals_12, buf15, 2064384, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_12
buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(1048576)](
buf11, primals_10, buf16, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf11
del primals_10
buf17 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(1048576)](
buf8, primals_5, buf17, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf8
del primals_5
return (buf14, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf7,
buf10, buf12, buf15, buf16, buf17)
class BasicMotionEncoderNew(nn.Module):
def __init__(self, args):
super(BasicMotionEncoderNew, self).__init__()
self.args = args
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv = nn.Conv2d(64 + 64, 128 - 2, 3, padding=1)
def forward(self, input_0, input_1):
primals_1 = self.convc1.weight
primals_2 = self.convc1.bias
primals_4 = self.convc2.weight
primals_5 = self.convc2.bias
primals_6 = self.convf1.weight
primals_7 = self.convf1.bias
primals_9 = self.convf2.weight
primals_10 = self.convf2.bias
primals_11 = self.conv.weight
primals_12 = self.conv.bias
primals_8 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| BrianPugh/RAFT-Stereo | BasicMotionEncoder | false | 3,430 | [
"MIT"
] | 0 | 494dd79545411eee56e32540bfd6f45a16c74a19 | https://github.com/BrianPugh/RAFT-Stereo/tree/494dd79545411eee56e32540bfd6f45a16c74a19 | from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv = nn.Conv2d(64 + 64, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
def get_inputs():
return [torch.rand([4, 2, 64, 64]), torch.rand([4, 36, 64, 64])]
def get_init_inputs():
return []
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ca/ccawxivjls6nn6fmdbkmz5aqdguiri4khvzuwx6zynye6gpiaja5.py
# Topologically Sorted Source Nodes: [conv1d_1, output_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# output_1 => relu
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/h2/ch27hkeznyxjvkqd6cqrzfsuls4s54ertvhjwcfmksi4fv53c6gm.py
# Topologically Sorted Source Nodes: [conv1d_3, output_3], Original ATen: [aten.convolution, aten.sum]
# Source node to ATen node mapping:
# conv1d_3 => convolution_3
# output_3 => sum_1
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_8, %primals_9, [1], [0], [1], False, [0], 1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convolution_3, [2]), kwargs = {})
triton_poi_fused_convolution_sum_2 = async_compile.triton('triton_poi_fused_convolution_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp4 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp7 + tmp2
tmp9 = tmp6 + tmp8
tmp11 = tmp10 + tmp2
tmp12 = tmp9 + tmp11
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (20, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (20, 20, 1), (20, 1, 1))
assert_size_stride(primals_7, (20, ), (1, ))
assert_size_stride(primals_8, (1, 20, 1), (20, 1, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 4), (80, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_1, output_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 320, grid=grid(320), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 4), (80, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv1d_2, output_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf5, primals_7, 320, grid=grid(320), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4), (4, 4, 1))
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_3, output_3], Original ATen: [aten.convolution, aten.sum]
triton_poi_fused_convolution_sum_2.run(buf6, primals_9, buf7, 4, grid=grid(4), stream=stream0)
del buf6
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 20, 1), (20, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 20, 1), (20, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class Critic(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, dynamic_size, hidden_size):
super(Critic, self).__init__()
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, dynamic):
dynamic_hidden = self.dynamic_encoder(dynamic)
output = F.relu(self.fc1(dynamic_hidden))
output = F.relu(self.fc2(output))
output = self.fc3(output).sum(dim=2)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dynamic_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp4 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp7 + tmp2
tmp9 = tmp6 + tmp8
tmp11 = tmp10 + tmp2
tmp12 = tmp9 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (20, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (20, 20, 1), (20, 1, 1))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (1, 20, 1), (20, 1, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 4), (80, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(320)](buf3, primals_5, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 4), (80, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_1[grid(320)](buf5, primals_7, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4), (4, 4, 1))
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused_convolution_sum_2[grid(4)](buf6, primals_9, buf7,
4, XBLOCK=4, num_warps=1, num_stages=1)
del buf6
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class CriticNew(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, dynamic_size, hidden_size):
super(CriticNew, self).__init__()
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, input_0):
primals_1 = self.dynamic_encoder.conv.weight
primals_2 = self.dynamic_encoder.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| dimichai/City-Metro-Network-Expansion-with-RL | Critic | false | 3,431 | [
"MIT"
] | 0 | 54cfec74d89b4e4fc912d480a3025e4c75e3b196 | https://github.com/dimichai/City-Metro-Network-Expansion-with-RL/tree/54cfec74d89b4e4fc912d480a3025e4c75e3b196 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super().__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class Model(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, dynamic_size, hidden_size):
super().__init__()
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, dynamic):
dynamic_hidden = self.dynamic_encoder(dynamic)
output = F.relu(self.fc1(dynamic_hidden))
output = F.relu(self.fc2(output))
output = self.fc3(output).sum(dim=2)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ActNorm2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mk/cmkfq3mg2sl66skmhudljmlrjckeersi3ye3frqm2bl7felzsn2n.py
# Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul]
# Source node to ATen node mapping:
# logdet => mul_1
# mul => mul
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ok/cok2tne7527mk7p2rdtta5caje65jdloa27vp4xewaco2m24lfgp.py
# Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul_2 => mul_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_5), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(buf2, primals_1, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, buf2, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class ActNorm2D(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm2D, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :, None, None]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum() * x.size(2) * x.size(3)
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(1)](buf2, primals_1, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1,
primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, buf2, primals_1, primals_2
class ActNorm2DNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm2DNew, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def forward(self, input_0):
primals_1 = self._log_scale
primals_3 = self._shift
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| david-klindt/invertible-resnet | ActNorm2D | false | 3,432 | [
"MIT"
] | 0 | ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89 | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :, None, None]
def shift(self):
return self._shift[None, :, None, None]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :, None, None]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum() * x.size(2) * x.size(3)
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CombineContext | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class CombineContext(nn.Module):
def __init__(self, num_features, num_context_features):
super(CombineContext, self).__init__()
self.linear = nn.Linear(num_features + num_context_features,
num_features)
def forward(self, token, prev_context_vector):
x = torch.cat((token, prev_context_vector), 1)
return self.linear(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'num_context_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
return buf1, buf0
class CombineContextNew(nn.Module):
def __init__(self, num_features, num_context_features):
super(CombineContextNew, self).__init__()
self.linear = nn.Linear(num_features + num_context_features,
num_features)
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| dmcinerney/Summarization | CombineContext | false | 3,433 | [
"Apache-2.0"
] | 0 | 4d30900757308f7981a6544b4d6890f15133f269 | https://github.com/dmcinerney/Summarization/tree/4d30900757308f7981a6544b4d6890f15133f269 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, num_features, num_context_features):
super().__init__()
self.linear = nn.Linear(num_features + num_context_features,
num_features)
def forward(self, token, prev_context_vector):
x = torch.cat((token, prev_context_vector), 1)
return self.linear(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
FC_ELU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# out_1 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class FC_ELU(nn.Module):
def __init__(self, in_dim, hidden_units):
super(FC_ELU, self).__init__()
self.fc = nn.Linear(in_dim, hidden_units)
self.elu = nn.ELU()
def forward(self, x):
out = self.fc(x)
out = self.elu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'hidden_units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class FC_ELUNew(nn.Module):
def __init__(self, in_dim, hidden_units):
super(FC_ELUNew, self).__init__()
self.fc = nn.Linear(in_dim, hidden_units)
self.elu = nn.ELU()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| donaldo3/Neural-voice-cloning | FC_ELU | false | 3,434 | [
"MIT"
] | 0 | a67cb8d34f5674e2c613d131f18182ad56d8f32f | https://github.com/donaldo3/Neural-voice-cloning/tree/a67cb8d34f5674e2c613d131f18182ad56d8f32f | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_dim, hidden_units):
super().__init__()
self.fc = nn.Linear(in_dim, hidden_units)
self.elu = nn.ELU()
def forward(self, x):
out = self.fc(x)
out = self.elu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Backbone | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pb/cpbkxgd5dxg55pl7wgewxipis6hd7pthrco4xeq35cuym5fsa534.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6tj5mjk4hz7kmrhrg5u2iwiezqcy7a3trlworoutxljynfwnvct.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 4), (4, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 10), (10, 1))
assert_size_stride(primals_5, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 640, grid=grid(640), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 2), (1, 10), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 10), (10, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Backbone(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.latent_size = latent_size
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
self.activation = torch.nn.Sigmoid()
def forward(self, x):
x = self.activation(self.dense1(x))
out = self.activation(self.dense2(x))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 4), (4, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 10), (10, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(640)](buf1, primals_2, 640, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0),
reinterpret_tensor(primals_4, (10, 2), (1, 10), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(128)](buf3, primals_5, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class BackboneNew(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.latent_size = latent_size
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
self.activation = torch.nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.dense1.weight
primals_2 = self.dense1.bias
primals_4 = self.dense2.weight
primals_5 = self.dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| dmoebius-dm/prototorch_models | Backbone | false | 3,435 | [
"MIT"
] | 0 | 71602bf38a09148eab13d98c9f89589b345ac570 | https://github.com/dmoebius-dm/prototorch_models/tree/71602bf38a09148eab13d98c9f89589b345ac570 | import torch
class Model(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.latent_size = latent_size
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
self.activation = torch.nn.Sigmoid()
def forward(self, x):
x = self.activation(self.dense1(x))
out = self.activation(self.dense2(x))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
RankScaledGaussianPrior | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gy/cgyinjk73k3ckuqmckw7gx2ner37mp6i6muzwehjhc3olxf7yqmi.py
# Topologically Sorted Source Nodes: [order, ranks], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# order => sort
# ranks => sort_1
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%arg0_1, 1), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1, 1), kwargs = {})
triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: '*i16', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = r2
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False)
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp9, tmp10, = triton_helpers.sort_with_index(tmp8, tmp4, None, 1, stable=False, descending=False)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp6, xmask)
tl.store(out_ptr1 + (x0 + (16*r2) + (64*x1)), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mm/cmmds2hgw7773oqfmguhsq3bp2r2stc3ocewznmq3gh5ofyoge7b.py
# Topologically Sorted Source Nodes: [order, ranks, neg, truediv, exp, neg_1, mul, exp_1], Original ATen: [aten.sort, aten.neg, aten.div, aten.exp, aten.mul]
# Source node to ATen node mapping:
# exp => exp
# exp_1 => exp_1
# mul => mul
# neg => neg
# neg_1 => neg_1
# order => sort
# ranks => sort_1
# truediv => div
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%arg0_1, 1), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_3,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 4), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%exp,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %arg0_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_div_exp_mul_neg_sort_1 = async_compile.triton('triton_poi_fused_div_exp_mul_neg_sort_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_neg_sort_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_mul_neg_sort_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tmp0.to(tl.int64)
tmp2 = -tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = -tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
# Topologically Sorted Source Nodes: [order, ranks], Original ATen: [aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_sort_0.run(arg0_1, buf1, buf3, 64, 4, grid=grid(64), stream=stream0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [order, ranks, neg, truediv, exp, neg_1, mul, exp_1], Original ATen: [aten.sort, aten.neg, aten.div, aten.exp, aten.mul]
triton_poi_fused_div_exp_mul_neg_sort_1.run(buf3, arg0_1, buf4, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf3
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
def rank_scaled_gaussian(distances, lambd):
order = torch.argsort(distances, dim=1)
ranks = torch.argsort(order, dim=1)
return torch.exp(-torch.exp(-ranks / lambd) * distances)
class RankScaledGaussianPrior(torch.nn.Module):
def __init__(self, lambd):
super().__init__()
self.lambd = lambd
def forward(self, distances):
return rank_scaled_gaussian(distances, self.lambd)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'lambd': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = r2
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
_tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=False)
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
_tmp9, tmp10 = triton_helpers.sort_with_index(tmp8, tmp4, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp6, xmask)
tl.store(out_ptr1 + (x0 + 16 * r2 + 64 * x1), tmp10, xmask)
@triton.jit
def triton_poi_fused_div_exp_mul_neg_sort_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tmp0.to(tl.int64)
tmp2 = -tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = -tmp6
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](arg0_1, buf1, buf3, 64, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_exp_mul_neg_sort_1[grid(256)](buf3, arg0_1,
buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf3
return buf4,
def rank_scaled_gaussian(distances, lambd):
order = torch.argsort(distances, dim=1)
ranks = torch.argsort(order, dim=1)
return torch.exp(-torch.exp(-ranks / lambd) * distances)
class RankScaledGaussianPriorNew(torch.nn.Module):
def __init__(self, lambd):
super().__init__()
self.lambd = lambd
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dmoebius-dm/prototorch_models | RankScaledGaussianPrior | false | 3,436 | [
"MIT"
] | 0 | 71602bf38a09148eab13d98c9f89589b345ac570 | https://github.com/dmoebius-dm/prototorch_models/tree/71602bf38a09148eab13d98c9f89589b345ac570 | import torch
def rank_scaled_gaussian(distances, lambd):
order = torch.argsort(distances, dim=1)
ranks = torch.argsort(order, dim=1)
return torch.exp(-torch.exp(-ranks / lambd) * distances)
class Model(torch.nn.Module):
def __init__(self, lambd):
super().__init__()
self.lambd = lambd
def forward(self, distances):
return rank_scaled_gaussian(distances, self.lambd)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SimpleMultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# y_2 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# y_2 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SimpleMultiheadAttention(nn.Module):
def __init__(self, d_x, d_attn, num_heads):
super(SimpleMultiheadAttention, self).__init__()
self.single_head_attn = nn.Linear(d_x, d_attn)
self.multi_head_attn = nn.Linear(d_attn, num_heads)
def forward(self, x):
y = self.single_head_attn(x)
nn.functional.relu(y)
y = self.multi_head_attn(y)
y = nn.functional.softmax(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_x': 4, 'd_attn': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf3, primals_4
class SimpleMultiheadAttentionNew(nn.Module):
def __init__(self, d_x, d_attn, num_heads):
super(SimpleMultiheadAttentionNew, self).__init__()
self.single_head_attn = nn.Linear(d_x, d_attn)
self.multi_head_attn = nn.Linear(d_attn, num_heads)
def forward(self, input_0):
primals_1 = self.single_head_attn.weight
primals_2 = self.single_head_attn.bias
primals_4 = self.multi_head_attn.weight
primals_5 = self.multi_head_attn.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| donaldo3/Neural-voice-cloning | SimpleMultiheadAttention | false | 3,437 | [
"MIT"
] | 0 | a67cb8d34f5674e2c613d131f18182ad56d8f32f | https://github.com/donaldo3/Neural-voice-cloning/tree/a67cb8d34f5674e2c613d131f18182ad56d8f32f | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, d_x, d_attn, num_heads):
super().__init__()
self.single_head_attn = nn.Linear(d_x, d_attn)
self.multi_head_attn = nn.Linear(d_attn, num_heads)
def forward(self, x):
y = self.single_head_attn(x)
nn.functional.relu(y)
y = self.multi_head_attn(y)
y = nn.functional.softmax(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
UpSampleAndHalveChannels | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ff/cffhfyg22xbribwqqqct5kdcsfampxjsxm645cabtsik2vspgfca.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, primals_1, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 2, 8, 8), (128, 64, 8, 1))
return (buf3, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn as nn
class UpSampleAndHalveChannels(nn.Module):
"""
Doubles the spatial dimensions (H,W) but halves the number of channels.
Inverse of the DownSample function in blocks.py
From Diakogiannis et al.
doi: 10.1016/j.isprsjprs.2020.01.013
"""
def __init__(self, _in_channels, _factor=2):
super().__init__()
self.in_channels = _in_channels
self.factor = _factor
self.upSample = nn.Upsample(scale_factor=self.factor, mode=
'bilinear', align_corners=None)
self.halveChannels = nn.Conv2d(in_channels=self.in_channels,
out_channels=self.in_channels // self.factor, kernel_size=(1, 1
), stride=1, padding=0, dilation=1, bias=False)
def forward(self, x: 'Tensor') ->Tensor:
out = self.upSample(x)
out = self.halveChannels(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'_in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf2, primals_1, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 2, 8, 8), (128, 64, 8, 1))
return buf3, primals_2, buf2
class UpSampleAndHalveChannelsNew(nn.Module):
"""
Doubles the spatial dimensions (H,W) but halves the number of channels.
Inverse of the DownSample function in blocks.py
From Diakogiannis et al.
doi: 10.1016/j.isprsjprs.2020.01.013
"""
def __init__(self, _in_channels, _factor=2):
super().__init__()
self.in_channels = _in_channels
self.factor = _factor
self.upSample = nn.Upsample(scale_factor=self.factor, mode=
'bilinear', align_corners=None)
self.halveChannels = nn.Conv2d(in_channels=self.in_channels,
out_channels=self.in_channels // self.factor, kernel_size=(1, 1
), stride=1, padding=0, dilation=1, bias=False)
def forward(self, input_0):
primals_2 = self.halveChannels.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| danielnflam/GAN-Tests | UpSampleAndHalveChannels | false | 3,438 | [
"BSD-3-Clause"
] | 0 | f112e27b802d717f64a8f2cfa79b9898667da14c | https://github.com/danielnflam/GAN-Tests/tree/f112e27b802d717f64a8f2cfa79b9898667da14c | import torch
from torch import Tensor
import torch.nn as nn
class Model(nn.Module):
"""
Doubles the spatial dimensions (H,W) but halves the number of channels.
Inverse of the DownSample function in blocks.py
From Diakogiannis et al.
doi: 10.1016/j.isprsjprs.2020.01.013
"""
def __init__(self, _in_channels, _factor=2):
super().__init__()
self.in_channels = _in_channels
self.factor = _factor
self.upSample = nn.Upsample(scale_factor=self.factor, mode=
'bilinear', align_corners=None)
self.halveChannels = nn.Conv2d(in_channels=self.in_channels,
out_channels=self.in_channels // self.factor, kernel_size=(1, 1
), stride=1, padding=0, dilation=1, bias=False)
def forward(self, x: 'Tensor') ->Tensor:
out = self.upSample(x)
out = self.halveChannels(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Fire | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iu/ciuhvpwv4mgl7csialwzejk66cfqkdjlfz6im45gsevf4yziv76y.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fg/cfgkc25oxbeqxawkpzsa2d5pac6hvxbmzyi6lq5squxd77l6eifd.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.cat, aten.elu]
# Source node to ATen node mapping:
# out => cat
# out_1 => expm1_1, gt_1, mul_3, mul_5, where_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_1, %convolution_2], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%cat, 0), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {})
triton_poi_fused_cat_elu_1 = async_compile.triton('triton_poi_fused_cat_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_elu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_elu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0)
tmp14 = tl.load(in_ptr3 + ((-4) + x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tmp19 = 0.0
tmp20 = tmp18 > tmp19
tmp21 = 1.0
tmp22 = tmp18 * tmp21
tmp23 = libdevice.expm1(tmp22)
tmp24 = tmp23 * tmp21
tmp25 = tl.where(tmp20, tmp22, tmp24)
tl.store(out_ptr0 + (x3), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_elu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.cat, aten.elu]
triton_poi_fused_cat_elu_1.run(buf2, primals_5, buf3, primals_7, buf4, 512, grid=grid(512), stream=stream0)
del buf2
del buf3
del primals_5
del primals_7
return (buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand_planes):
super(Fire, self).__init__()
self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1,
stride=1)
self.relu1 = nn.ELU(inplace=True)
self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1,
stride=1)
self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3,
stride=1, padding=1)
self.relu2 = nn.ELU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
out1 = self.conv2(x)
out2 = self.conv3(x)
out = torch.cat([out1, out2], 1)
out = self.relu2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand_planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_elu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tmp19 = 0.0
tmp20 = tmp18 > tmp19
tmp21 = 1.0
tmp22 = tmp18 * tmp21
tmp23 = libdevice.expm1(tmp22)
tmp24 = tmp23 * tmp21
tmp25 = tl.where(tmp20, tmp22, tmp24)
tl.store(out_ptr0 + x3, tmp25, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_elu_1[grid(512)](buf2, primals_5, buf3,
primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_5
del primals_7
return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf4
class FireNew(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand_planes):
super(FireNew, self).__init__()
self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1,
stride=1)
self.relu1 = nn.ELU(inplace=True)
self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1,
stride=1)
self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3,
stride=1, padding=1)
self.relu2 = nn.ELU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| dcrmg/Efficient-Segmentation-Networks | Fire | false | 3,439 | [
"MIT"
] | 0 | e2f2d90d69e4e9af464678b0f02bc754c28f643d | https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d | import math
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand_planes):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1,
stride=1)
self.relu1 = nn.ELU(inplace=True)
self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1,
stride=1)
self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3,
stride=1, padding=1)
self.relu2 = nn.ELU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
out1 = self.conv2(x)
out2 = self.conv3(x)
out = torch.cat([out1, out2], 1)
out = self.relu2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GenNoise | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ga/cgaj3vplhfljncuhdgskdq43ot6mtdt3rt6lhe6vef6p6aobk6ol.py
# Topologically Sorted Source Nodes: [b], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# b => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [b], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [b, normal_], Original ATen: [aten._to_copy, aten.normal_functional]
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim2': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
return buf2,
class GenNoiseNew(nn.Module):
def __init__(self, dim2):
super(GenNoiseNew, self).__init__()
self.dim2 = dim2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dustlrdk/noise2self | GenNoise | false | 3,440 | [
"MIT"
] | 0 | 46e8c4650f7ec4f664448417fecd39b4cae477f7 | https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim2):
super().__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ParallelDilatedConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5k/c5ktbojc6h2jntejgkjq4yj5n4u3gwp5gjvczhk3xsjv72iazaza.py
# Topologically Sorted Source Nodes: [out1, out2, out3, out4, out1_1, out2_1, out3_1, out4_1, add, add_1, out], Original ATen: [aten.convolution, aten.elu, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# out => add_2
# out1 => convolution
# out1_1 => expm1, gt, mul, mul_2, where
# out2 => convolution_1
# out2_1 => expm1_1, gt_1, mul_3, mul_5, where_1
# out3 => convolution_2
# out3_1 => expm1_2, gt_2, mul_6, mul_8, where_2
# out4 => convolution_3
# out4_1 => expm1_3, gt_3, mul_11, mul_9, where_3
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [2, 2], [2, 2], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_6, %primals_7, [1, 1], [3, 3], [3, 3], False, [0, 0], 1), kwargs = {})
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_8, %primals_9, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {})
# %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {})
# %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %where_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %where_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %where_3), kwargs = {})
triton_poi_fused_add_convolution_elu_0 = async_compile.triton('triton_poi_fused_add_convolution_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_elu_0(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr2 + (x3), xmask)
tmp7 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_out_ptr3 + (x3), xmask)
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp2 > tmp12
tmp14 = 1.0
tmp15 = tmp2 * tmp14
tmp16 = libdevice.expm1(tmp15)
tmp17 = tmp16 * tmp14
tmp18 = tl.where(tmp13, tmp15, tmp17)
tmp19 = tmp5 > tmp12
tmp20 = tmp5 * tmp14
tmp21 = libdevice.expm1(tmp20)
tmp22 = tmp21 * tmp14
tmp23 = tl.where(tmp19, tmp20, tmp22)
tmp24 = tmp18 + tmp23
tmp25 = tmp8 > tmp12
tmp26 = tmp8 * tmp14
tmp27 = libdevice.expm1(tmp26)
tmp28 = tmp27 * tmp14
tmp29 = tl.where(tmp25, tmp26, tmp28)
tmp30 = tmp24 + tmp29
tmp31 = tmp11 > tmp12
tmp32 = tmp11 * tmp14
tmp33 = libdevice.expm1(tmp32)
tmp34 = tmp33 * tmp14
tmp35 = tl.where(tmp31, tmp32, tmp34)
tmp36 = tmp30 + tmp35
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(in_out_ptr2 + (x3), tmp8, xmask)
tl.store(in_out_ptr3 + (x3), tmp11, xmask)
tl.store(out_ptr0 + (x3), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(primals_3, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf5 = buf4; del buf4 # reuse
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out1, out2, out3, out4, out1_1, out2_1, out3_1, out4_1, add, add_1, out], Original ATen: [aten.convolution, aten.elu, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_elu_0.run(buf1, buf3, buf5, buf7, primals_2, primals_5, primals_7, primals_9, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_5
del primals_7
del primals_9
return (buf8, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ParallelDilatedConv(nn.Module):
def __init__(self, inplanes, planes):
super(ParallelDilatedConv, self).__init__()
self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=1, dilation=1)
self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=2, dilation=2)
self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=3, dilation=3)
self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=4, dilation=4)
self.relu1 = nn.ELU(inplace=True)
self.relu2 = nn.ELU(inplace=True)
self.relu3 = nn.ELU(inplace=True)
self.relu4 = nn.ELU(inplace=True)
def forward(self, x):
out1 = self.dilated_conv_1(x)
out2 = self.dilated_conv_2(x)
out3 = self.dilated_conv_3(x)
out4 = self.dilated_conv_4(x)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
out3 = self.relu3(out3)
out4 = self.relu4(out4)
out = out1 + out2 + out3 + out4
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_elu_0(in_out_ptr0, in_out_ptr1,
in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr2 + x3, xmask)
tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_out_ptr3 + x3, xmask)
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp2 > tmp12
tmp14 = 1.0
tmp15 = tmp2 * tmp14
tmp16 = libdevice.expm1(tmp15)
tmp17 = tmp16 * tmp14
tmp18 = tl.where(tmp13, tmp15, tmp17)
tmp19 = tmp5 > tmp12
tmp20 = tmp5 * tmp14
tmp21 = libdevice.expm1(tmp20)
tmp22 = tmp21 * tmp14
tmp23 = tl.where(tmp19, tmp20, tmp22)
tmp24 = tmp18 + tmp23
tmp25 = tmp8 > tmp12
tmp26 = tmp8 * tmp14
tmp27 = libdevice.expm1(tmp26)
tmp28 = tmp27 * tmp14
tmp29 = tl.where(tmp25, tmp26, tmp28)
tmp30 = tmp24 + tmp29
tmp31 = tmp11 > tmp12
tmp32 = tmp11 * tmp14
tmp33 = libdevice.expm1(tmp32)
tmp34 = tmp33 * tmp14
tmp35 = tl.where(tmp31, tmp32, tmp34)
tmp36 = tmp30 + tmp35
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(in_out_ptr2 + x3, tmp8, xmask)
tl.store(in_out_ptr3 + x3, tmp11, xmask)
tl.store(out_ptr0 + x3, tmp36, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1,
1), padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = extern_kernels.convolution(primals_3, primals_8, stride=(1,
1), padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf5 = buf4
del buf4
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_convolution_elu_0[grid(256)](buf1, buf3, buf5,
buf7, primals_2, primals_5, primals_7, primals_9, buf8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_5
del primals_7
del primals_9
return (buf8, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5, buf7)
class ParallelDilatedConvNew(nn.Module):
def __init__(self, inplanes, planes):
super(ParallelDilatedConvNew, self).__init__()
self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=1, dilation=1)
self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=2, dilation=2)
self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=3, dilation=3)
self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=4, dilation=4)
self.relu1 = nn.ELU(inplace=True)
self.relu2 = nn.ELU(inplace=True)
self.relu3 = nn.ELU(inplace=True)
self.relu4 = nn.ELU(inplace=True)
def forward(self, input_0):
primals_1 = self.dilated_conv_1.weight
primals_2 = self.dilated_conv_1.bias
primals_4 = self.dilated_conv_2.weight
primals_5 = self.dilated_conv_2.bias
primals_6 = self.dilated_conv_3.weight
primals_7 = self.dilated_conv_3.bias
primals_8 = self.dilated_conv_4.weight
primals_9 = self.dilated_conv_4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| dcrmg/Efficient-Segmentation-Networks | ParallelDilatedConv | false | 3,441 | [
"MIT"
] | 0 | e2f2d90d69e4e9af464678b0f02bc754c28f643d | https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=1, dilation=1)
self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=2, dilation=2)
self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=3, dilation=3)
self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=4, dilation=4)
self.relu1 = nn.ELU(inplace=True)
self.relu2 = nn.ELU(inplace=True)
self.relu3 = nn.ELU(inplace=True)
self.relu4 = nn.ELU(inplace=True)
def forward(self, x):
out1 = self.dilated_conv_1(x)
out2 = self.dilated_conv_2(x)
out3 = self.dilated_conv_3(x)
out4 = self.dilated_conv_4(x)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
out3 = self.relu3(out3)
out4 = self.relu4(out4)
out = out1 + out2 + out3 + out4
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Tanh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TanhNew(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(TanhNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dustlrdk/noise2self | Tanh | false | 3,442 | [
"MIT"
] | 0 | 46e8c4650f7ec4f664448417fecd39b4cae477f7 | https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
StdConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fc/cfcsjbejfvouquyzp2xodbxic63gsatkd3s7i6ajpoeoo7kagts6.py
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# sqrt => sqrt
# sub => sub
# var_mean => var_mean
# w => div
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-10), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_0 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (64*x0)), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0.run(buf3, primals_1, buf4, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf6, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf6, primals_1, primals_3, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3,
primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_1, primals_3, buf3, buf4
class StdConv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dustasa/senior_software_HW | StdConv2d | false | 3,443 | [
"Apache-2.0"
] | 0 | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Model(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
StateCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/db/cdbqnvlg2p7o3kynhpgntondgkltj6ifa2zhdnptkuuwfd2elkjn.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# hidden => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %convolution_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp10 & xmask, other=0.0)
tmp14 = tl.load(in_ptr3 + ((-4) + x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ca/ccawxivjls6nn6fmdbkmz5aqdguiri4khvzuwx6zynye6gpiaja5.py
# Topologically Sorted Source Nodes: [conv1d_2, output_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1d_2 => convolution_2
# output_2 => relu
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_7, %primals_8, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ho/choqi2vfsodfa7lty2smfrb6jfeweqd4gm2ic626c3f5cyqyded3.py
# Topologically Sorted Source Nodes: [conv1d_4, output_4], Original ATen: [aten.convolution, aten.sum]
# Source node to ATen node mapping:
# conv1d_4 => convolution_4
# output_4 => sum_1
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_11, %primals_12, [1], [0], [1], False, [0], 1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convolution_4,), kwargs = {})
triton_per_fused_convolution_sum_2 = async_compile.triton('triton_per_fused_convolution_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (20, 8, 1), (8, 1, 1))
assert_size_stride(primals_8, (20, ), (1, ))
assert_size_stride(primals_9, (20, 20, 1), (20, 1, 1))
assert_size_stride(primals_10, (20, ), (1, ))
assert_size_stride(primals_11, (1, 20, 1), (20, 1, 1))
assert_size_stride(primals_12, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, buf1, primals_5, buf2, 128, grid=grid(128), stream=stream0)
del buf0
del buf1
del primals_2
del primals_5
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 20, 4), (80, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv1d_2, output_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf4, primals_8, 320, grid=grid(320), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 20, 4), (80, 4, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv1d_3, output_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf6, primals_10, 320, grid=grid(320), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [conv1d_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4), (4, 4, 1))
buf8 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_4, output_4], Original ATen: [aten.convolution, aten.sum]
triton_per_fused_convolution_sum_2.run(buf7, primals_12, buf8, 1, 16, grid=grid(1), stream=stream0)
del buf7
del primals_12
return (buf8, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_11, buf2, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, 8, 1), (8, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((20, 20, 1), (20, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 20, 1), (20, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class StateCritic(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, static_size, dynamic_size, hidden_size):
super(StateCritic, self).__init__()
self.static_encoder = Encoder(static_size, hidden_size)
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size * 2, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, static, dynamic):
static_hidden = self.static_encoder(static)
dynamic_hidden = self.dynamic_encoder(dynamic)
hidden = torch.cat((static_hidden, dynamic_hidden), 1)
output = F.relu(self.fc1(hidden))
output = F.relu(self.fc2(output))
output = self.fc3(output).sum()
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'static_size': 4, 'dynamic_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp10 & xmask,
other=0.0)
tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_per_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (20, 8, 1), (8, 1, 1))
assert_size_stride(primals_8, (20,), (1,))
assert_size_stride(primals_9, (20, 20, 1), (20, 1, 1))
assert_size_stride(primals_10, (20,), (1,))
assert_size_stride(primals_11, (1, 20, 1), (20, 1, 1))
assert_size_stride(primals_12, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf0, primals_2, buf1, primals_5,
buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_2
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 20, 4), (80, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_1[grid(320)](buf4, primals_8, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 20, 4), (80, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(320)](buf6, primals_10,
320, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4), (4, 4, 1))
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_convolution_sum_2[grid(1)](buf7, primals_12, buf8,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf7
del primals_12
return (buf8, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, primals_11, buf2, buf4, buf6)
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class StateCriticNew(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, static_size, dynamic_size, hidden_size):
super(StateCriticNew, self).__init__()
self.static_encoder = Encoder(static_size, hidden_size)
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size * 2, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, input_0, input_1):
primals_1 = self.static_encoder.conv.weight
primals_2 = self.static_encoder.conv.bias
primals_4 = self.dynamic_encoder.conv.weight
primals_5 = self.dynamic_encoder.conv.bias
primals_7 = self.fc1.weight
primals_8 = self.fc1.bias
primals_9 = self.fc2.weight
primals_10 = self.fc2.bias
primals_11 = self.fc3.weight
primals_12 = self.fc3.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| dimichai/City-Metro-Network-Expansion-with-RL | StateCritic | false | 3,444 | [
"MIT"
] | 0 | 54cfec74d89b4e4fc912d480a3025e4c75e3b196 | https://github.com/dimichai/City-Metro-Network-Expansion-with-RL/tree/54cfec74d89b4e4fc912d480a3025e4c75e3b196 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super().__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output
class Model(nn.Module):
"""Estimates the problem complexity.
This is a basic module that just looks at the log-probabilities predicted by
the encoder + decoder, and returns an estimate of complexity
"""
def __init__(self, static_size, dynamic_size, hidden_size):
super().__init__()
self.static_encoder = Encoder(static_size, hidden_size)
self.dynamic_encoder = Encoder(dynamic_size, hidden_size)
self.fc1 = nn.Conv1d(hidden_size * 2, 20, kernel_size=1)
self.fc2 = nn.Conv1d(20, 20, kernel_size=1)
self.fc3 = nn.Conv1d(20, 1, kernel_size=1)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
def forward(self, static, dynamic):
static_hidden = self.static_encoder(static)
dynamic_hidden = self.dynamic_encoder(dynamic)
hidden = torch.cat((static_hidden, dynamic_hidden), 1)
output = F.relu(self.fc1(hidden))
output = F.relu(self.fc2(output))
output = self.fc3(output).sum()
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SIREN_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ec/cec5zw6ptyeiiv5jnkkfagw3kvnrcqjfjvd6seibrgx6km4gsqrl.py
# Topologically Sorted Source Nodes: [x, mul, sin], Original ATen: [aten.convolution, aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# sin => sin
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 30), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_convolution_mul_sin_0 = async_compile.triton('triton_poi_fused_convolution_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sin_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mul, sin], Original ATen: [aten.convolution, aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sin_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layer, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch_in': 4, 'ch_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sin_0[grid(256)](buf1, primals_2,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layerNew(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layerNew, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dustlrdk/noise2self | SIREN_layer | false | 3,445 | [
"MIT"
] | 0 | 46e8c4650f7ec4f664448417fecd39b4cae477f7 | https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7 | import torch
import numpy as np
import torch.nn as nn
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sin(x)
class Model(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super().__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ns/cns2e6t63to7326mqrrbrukx4bu4zf2kd446tymg6aua6u7rlxyr.py
# Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d => convolution
# tanh => tanh
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_tanh_0 = async_compile.triton('triton_poi_fused_convolution_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wy/cwyx3wa4jndgnwzcjpr33hhlviahccyeckxfax46ztwjbjc22gd7.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n5/cn53lh5pa2vl5a6c3hojdjif6ggeo3nogqndrjwgnbauidocwf7m.py
# Topologically Sorted Source Nodes: [conv2d_1, tanh_1], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# tanh_1 => tanh_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_tanh_2 = async_compile.triton('triton_poi_fused_convolution_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5x/c5xqmogb4ei265h7d4tbtgbg3ybcwnd6logewpzvenzqwvangf2k.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%tanh_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tr/ctrwo7eblwgormkgc6rkz3mbvcqxgbihf45vdjyrhnv7sr6x6g4c.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out_3 => tanh_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (32, 512), (512, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (2, 32), (32, 1))
assert_size_stride(primals_9, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_tanh_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 32, 32), (8192, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, tanh_1], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_2.run(buf5, primals_5, 32768, grid=grid(32768), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.int8)
buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 8192, grid=grid(8192), stream=stream0)
buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (16, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 32), (1, 512), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_4.run(buf9, primals_7, 512, grid=grid(512), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 512), (512, 1), 0), buf9, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.fc1 = nn.Linear(8 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)
out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)
out = out.view(-1, 8 * 8 * 8)
out = torch.tanh(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (32, 512), (512, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (2, 32), (32, 1))
assert_size_stride(primals_9, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_tanh_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2,
buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 32, 32), (8192, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_tanh_2[grid(32768)](buf5, primals_5,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch
.int8)
buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(8192)](buf5, buf6,
buf7, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 512), (512, 1), 0),
reinterpret_tensor(primals_6, (512, 32), (1, 512), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_tanh_4[grid(512)](buf9, primals_7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (16, 512), (512, 1), 0), buf9,
primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.fc1 = nn.Linear(8 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| dustasa/senior_software_HW | Net | false | 3,446 | [
"Apache-2.0"
] | 0 | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.fc1 = nn.Linear(8 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)
out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)
out = out.view(-1, 8 * 8 * 8)
out = torch.tanh(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
GE2ELoss | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xs/cxsrpt6fc6keotwox53jqah3sh3dxvxtz3kwc2blhwib6t3qy42k.py
# Topologically Sorted Source Nodes: [sub, centroids_1], Original ATen: [aten.sub, aten.div]
# Source node to ATen node mapping:
# centroids_1 => div
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %primals_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 3), kwargs = {})
triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 - tmp7
tmp9 = 0.3333333333333333
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dv/cdvje63sjkelf3el4u53unnyfy2sevhogihzqbwx6hsyrbsy7cbg.py
# Topologically Sorted Source Nodes: [cos_same], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
# Source node to ATen node mapping:
# cos_same => clamp_min_1, clamp_min_2, div_1, div_2, mul, pow_1, pow_2, pow_3, pow_4, sum_2, sum_3
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %clamp_min_2), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %clamp_min_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %div_1), kwargs = {})
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x2), xmask)
tmp17 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bd/cbdptvqpyegwucijcr6uwn6bfx3eo4vhu5rbuhfpm7rxzwz5eeqd.py
# Topologically Sorted Source Nodes: [centroids, centroids_expand, cos_diff], Original ATen: [aten.mean, aten.repeat, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# centroids => mean
# centroids_expand => repeat
# cos_diff => pow_7, sum_6
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [1]), kwargs = {})
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%mean, [16, 1]), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%repeat, 2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1], True), kwargs = {})
triton_poi_fused_linalg_vector_norm_mean_repeat_2 = async_compile.triton('triton_poi_fused_linalg_vector_norm_mean_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_mean_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_linalg_vector_norm_mean_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*(x0 % 4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (5 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (9 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (13 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (6 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (10 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (14 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (3 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (7 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (11 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (15 + (16*(x0 % 4))), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp8 * tmp8
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp16 / tmp7
tmp18 = tmp17 * tmp17
tmp19 = tmp9 + tmp18
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp27 * tmp27
tmp29 = tmp19 + tmp28
tmp32 = tmp30 + tmp31
tmp34 = tmp32 + tmp33
tmp36 = tmp34 + tmp35
tmp37 = tmp36 / tmp7
tmp38 = tmp37 * tmp37
tmp39 = tmp29 + tmp38
tl.store(out_ptr0 + (x0), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nh/cnhrs5oppznwegybmgag7bb4gbadinbwe6rek6ylphkotmgrmcpa.py
# Topologically Sorted Source Nodes: [centroids, centroids_expand, cos_diff], Original ATen: [aten.mean, aten.repeat, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
# Source node to ATen node mapping:
# centroids => mean
# centroids_expand => repeat
# cos_diff => clamp_min_3, clamp_min_4, div_3, div_4, mul_1, pow_5, pow_6, pow_8, sum_5
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [1]), kwargs = {})
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%mean, [16, 1]), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_6, 0.5), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_6, 1e-08), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_8, 1e-08), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%repeat, %clamp_min_4), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_3, %clamp_min_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, %div_3), kwargs = {})
triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 // 4))), xmask)
tmp1 = tl.load(in_ptr0 + (4*(x1 // 4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*(x1 // 4))), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*(x1 // 4))), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*(x1 // 4))), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (x0 + (16*(x1 % 4))), xmask)
tmp17 = tl.load(in_ptr0 + (4 + x0 + (16*(x1 % 4))), xmask)
tmp19 = tl.load(in_ptr0 + (8 + x0 + (16*(x1 % 4))), xmask)
tmp21 = tl.load(in_ptr0 + (12 + x0 + (16*(x1 % 4))), xmask)
tmp25 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp26 = libdevice.sqrt(tmp25)
tmp27 = triton_helpers.maximum(tmp26, tmp13)
tmp28 = tmp24 / tmp27
tmp29 = tmp15 * tmp28
tl.store(out_ptr0 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ir/cirx4jc3ufwibutztgyf7asd3mqxh5khmvscvbnbn4jwbtzmxwex.py
# Topologically Sorted Source Nodes: [cos_diff], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# cos_diff => sum_7
# Graph fragment:
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused_sum_4 = async_compile.triton('triton_poi_fused_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ql/cqls67w2vyuytuch5eevuasqxfpvx6dzsfraeuvrtvwpy4pinmbt.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.index_put]
# Source node to ATen node mapping:
# setitem => index_put
# Graph fragment:
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%view_4, [%lift_fresh_copy, None, %lift_fresh_copy_1], %view_5), kwargs = {})
triton_poi_fused_index_put_5 = async_compile.triton('triton_poi_fused_index_put_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_5', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
x0 = xindex % 4
tmp11 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + ((4*x0) + (17*tmp10)), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lc/clc6rso4dneipezyns6n5nqji3ursl6dn5npzhlm6jd7b6k2a73q.py
# Topologically Sorted Source Nodes: [cos_diff_2, mul, sim_matrix, pos, exp, sum_2, add_2, neg, sub_1, per_embedding_loss, loss], Original ATen: [aten.add, aten.mul, aten.index, aten.exp, aten.sum, aten.log, aten.sub]
# Source node to ATen node mapping:
# add_2 => add_2
# cos_diff_2 => add
# exp => exp
# loss => sum_9
# mul => mul_2
# neg => log
# per_embedding_loss => mul_3
# pos => index
# sim_matrix => add_1
# sub_1 => sub_1
# sum_2 => sum_8
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, 1e-06), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %add), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_3), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%add_1, [%lift_fresh_copy_2, None, %lift_fresh_copy_3]), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%add_1,), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, 1e-06), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index, %log), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, -1), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_exp_index_log_mul_sub_sum_6 = async_compile.triton('triton_per_fused_add_exp_index_log_mul_sub_sum_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_index_log_mul_sub_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_index_log_mul_sub_sum_6(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = (rindex // 4)
r0 = rindex % 4
r2 = rindex
tmp11 = tl.load(in_ptr0 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp17 = tl.load(in_ptr2 + (0))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.load(in_ptr1 + (4*r2), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (1 + (4*r2)), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (2 + (4*r2)), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (3 + (4*r2)), None, eviction_policy='evict_last')
tmp0 = r1
tmp1 = tl.full([1, 1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1, 1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp13 = tl.load(in_ptr1 + ((4*r0) + (17*tmp10)), None, eviction_policy='evict_last')
tmp14 = 1e-06
tmp15 = tmp13 + tmp14
tmp16 = tmp12 * tmp15
tmp19 = tmp16 + tmp18
tmp21 = tmp20 + tmp14
tmp22 = tmp12 * tmp21
tmp23 = tmp22 + tmp18
tmp24 = tl_math.exp(tmp23)
tmp26 = tmp25 + tmp14
tmp27 = tmp12 * tmp26
tmp28 = tmp27 + tmp18
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp24 + tmp29
tmp32 = tmp31 + tmp14
tmp33 = tmp12 * tmp32
tmp34 = tmp33 + tmp18
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp30 + tmp35
tmp38 = tmp37 + tmp14
tmp39 = tmp12 * tmp38
tmp40 = tmp39 + tmp18
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp36 + tmp41
tmp43 = tmp42 + tmp14
tmp44 = tl_math.log(tmp43)
tmp45 = tmp19 - tmp44
tmp46 = -1.0
tmp47 = tmp45 * tmp46
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.sum(tmp48, 1)[:, None]
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp50, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, centroids_1], Original ATen: [aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sub_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cos_same], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1.run(primals_2, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (64, 1), (1, 64), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [centroids, centroids_expand, cos_diff], Original ATen: [aten.mean, aten.repeat, aten.linalg_vector_norm]
triton_poi_fused_linalg_vector_norm_mean_repeat_2.run(primals_2, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [centroids, centroids_expand, cos_diff], Original ATen: [aten.mean, aten.repeat, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3.run(primals_2, buf2, buf3, 256, grid=grid(256), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf2, (64, ), (1, ), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [cos_diff], Original ATen: [aten.sum]
triton_poi_fused_sum_4.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.index_put]
triton_poi_fused_index_put_5.run(buf1, buf4, 16, grid=grid(16), stream=stream0)
del buf1
buf7 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [cos_diff_2, mul, sim_matrix, pos, exp, sum_2, add_2, neg, sub_1, per_embedding_loss, loss], Original ATen: [aten.add, aten.mul, aten.index, aten.exp, aten.sum, aten.log, aten.sub]
triton_per_fused_add_exp_index_log_mul_sub_sum_6.run(primals_1, buf4, primals_3, buf7, 1, 16, grid=grid(1), stream=stream0)
return (buf7, primals_1, primals_3, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
def calc_loss(sim_matrix):
same_idx = list(range(sim_matrix.size(0)))
pos = sim_matrix[same_idx, :, same_idx]
neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_()
per_embedding_loss = -1 * (pos - neg)
loss = per_embedding_loss.sum()
return loss, per_embedding_loss
def get_centroids(embeddings):
centroids = embeddings.mean(dim=1)
return centroids
def get_utterance_centroids(embeddings):
"""
Returns the centroids for each utterance of a speaker, where
the utterance centroid is the speaker centroid without considering
this utterance
Shape of embeddings should be:
(speaker_ct, utterance_per_speaker_ct, embedding_size)
"""
sum_centroids = embeddings.sum(dim=1)
sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1,
sum_centroids.shape[-1])
num_utterances = embeddings.shape[1] - 1
centroids = (sum_centroids - embeddings) / num_utterances
return centroids
def get_cossim(embeddings, centroids):
num_utterances = embeddings.shape[1]
utterance_centroids = get_utterance_centroids(embeddings)
utterance_centroids_flat = utterance_centroids.view(utterance_centroids
.shape[0] * utterance_centroids.shape[1], -1)
embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1)
cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)
centroids_expand = centroids.repeat((num_utterances * embeddings.shape[
0], 1))
embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.
shape[0], 1)
embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] *
embeddings_expand.shape[1], embeddings_expand.shape[-1])
cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)
cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids.
size(0))
same_idx = list(range(embeddings.size(0)))
cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0],
num_utterances)
cos_diff = cos_diff + 1e-06
return cos_diff
class GE2ELoss(nn.Module):
def __init__(self, device):
super(GE2ELoss, self).__init__()
self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True)
self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True)
self.device = device
def forward(self, embeddings):
torch.clamp(self.w, 1e-06)
centroids = get_centroids(embeddings)
cossim = get_cossim(embeddings, centroids)
sim_matrix = self.w * cossim + self.b
loss, _ = calc_loss(sim_matrix)
return loss
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 - tmp7
tmp9 = 0.3333333333333333
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_linalg_vector_norm_mean_repeat_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * (x0 % 4), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + (4 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (12 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (5 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (9 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (13 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (2 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (6 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (10 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (14 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (3 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (7 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr0 + (11 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (15 + 16 * (x0 % 4)), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp8 * tmp8
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp16 / tmp7
tmp18 = tmp17 * tmp17
tmp19 = tmp9 + tmp18
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp27 * tmp27
tmp29 = tmp19 + tmp28
tmp32 = tmp30 + tmp31
tmp34 = tmp32 + tmp33
tmp36 = tmp34 + tmp35
tmp37 = tmp36 / tmp7
tmp38 = tmp37 * tmp37
tmp39 = tmp29 + tmp38
tl.store(out_ptr0 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3(in_ptr0
, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tmp1 = tl.load(in_ptr0 + 4 * (x1 // 4), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (1 + 4 * (x1 // 4)), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * (x1 // 4)), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * (x1 // 4)), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 4)), xmask)
tmp17 = tl.load(in_ptr0 + (4 + x0 + 16 * (x1 % 4)), xmask)
tmp19 = tl.load(in_ptr0 + (8 + x0 + 16 * (x1 % 4)), xmask)
tmp21 = tl.load(in_ptr0 + (12 + x0 + 16 * (x1 % 4)), xmask)
tmp25 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp26 = libdevice.sqrt(tmp25)
tmp27 = triton_helpers.maximum(tmp26, tmp13)
tmp28 = tmp24 / tmp27
tmp29 = tmp15 * tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_index_put_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
x0 = xindex % 4
tmp11 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + (4 * x0 + 17 * tmp10), tmp17, xmask)
@triton.jit
def triton_per_fused_add_exp_index_log_mul_sub_sum_6(in_ptr0, in_ptr1,
in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4
r0 = rindex % 4
r2 = rindex
tmp11 = tl.load(in_ptr0 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp17 = tl.load(in_ptr2 + 0)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last')
tmp0 = r1
tmp1 = tl.full([1, 1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1, 1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp13 = tl.load(in_ptr1 + (4 * r0 + 17 * tmp10), None, eviction_policy=
'evict_last')
tmp14 = 1e-06
tmp15 = tmp13 + tmp14
tmp16 = tmp12 * tmp15
tmp19 = tmp16 + tmp18
tmp21 = tmp20 + tmp14
tmp22 = tmp12 * tmp21
tmp23 = tmp22 + tmp18
tmp24 = tl_math.exp(tmp23)
tmp26 = tmp25 + tmp14
tmp27 = tmp12 * tmp26
tmp28 = tmp27 + tmp18
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp24 + tmp29
tmp32 = tmp31 + tmp14
tmp33 = tmp12 * tmp32
tmp34 = tmp33 + tmp18
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp30 + tmp35
tmp38 = tmp37 + tmp14
tmp39 = tmp12 * tmp38
tmp40 = tmp39 + tmp18
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp36 + tmp41
tmp43 = tmp42 + tmp14
tmp44 = tl_math.log(tmp43)
tmp45 = tmp19 - tmp44
tmp46 = -1.0
tmp47 = tmp45 * tmp46
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.sum(tmp48, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(64)](primals_2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1[grid(64)](
primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (64, 1), (1, 64), 0)
del buf0
triton_poi_fused_linalg_vector_norm_mean_repeat_2[grid(64)](primals_2,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3[
grid(256)](primals_2, buf2, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf2, (64,), (1,), 0)
del buf2
triton_poi_fused_sum_4[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf3
triton_poi_fused_index_put_5[grid(16)](buf1, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
buf7 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_exp_index_log_mul_sub_sum_6[grid(1)](primals_1,
buf4, primals_3, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
return buf7, primals_1, primals_3, reinterpret_tensor(buf4, (4, 4, 4),
(16, 4, 1), 0)
def calc_loss(sim_matrix):
same_idx = list(range(sim_matrix.size(0)))
pos = sim_matrix[same_idx, :, same_idx]
neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_()
per_embedding_loss = -1 * (pos - neg)
loss = per_embedding_loss.sum()
return loss, per_embedding_loss
def get_centroids(embeddings):
centroids = embeddings.mean(dim=1)
return centroids
def get_utterance_centroids(embeddings):
"""
Returns the centroids for each utterance of a speaker, where
the utterance centroid is the speaker centroid without considering
this utterance
Shape of embeddings should be:
(speaker_ct, utterance_per_speaker_ct, embedding_size)
"""
sum_centroids = embeddings.sum(dim=1)
sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1,
sum_centroids.shape[-1])
num_utterances = embeddings.shape[1] - 1
centroids = (sum_centroids - embeddings) / num_utterances
return centroids
def get_cossim(embeddings, centroids):
num_utterances = embeddings.shape[1]
utterance_centroids = get_utterance_centroids(embeddings)
utterance_centroids_flat = utterance_centroids.view(utterance_centroids
.shape[0] * utterance_centroids.shape[1], -1)
embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1)
cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)
centroids_expand = centroids.repeat((num_utterances * embeddings.shape[
0], 1))
embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.
shape[0], 1)
embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] *
embeddings_expand.shape[1], embeddings_expand.shape[-1])
cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)
cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids.
size(0))
same_idx = list(range(embeddings.size(0)))
cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0],
num_utterances)
cos_diff = cos_diff + 1e-06
return cos_diff
class GE2ELossNew(nn.Module):
def __init__(self, device):
super(GE2ELossNew, self).__init__()
self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True)
self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True)
self.device = device
def forward(self, input_0):
primals_1 = self.w
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dodo0822/PyTorch_Speaker_Verification | GE2ELoss | false | 3,447 | [
"BSD-3-Clause"
] | 0 | 5310f441894e77895de27380d31149629e309d0f | https://github.com/dodo0822/PyTorch_Speaker_Verification/tree/5310f441894e77895de27380d31149629e309d0f | import torch
import torch.nn.functional as F
import torch.nn as nn
def calc_loss(sim_matrix):
same_idx = list(range(sim_matrix.size(0)))
pos = sim_matrix[same_idx, :, same_idx]
neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_()
per_embedding_loss = -1 * (pos - neg)
loss = per_embedding_loss.sum()
return loss, per_embedding_loss
def get_centroids(embeddings):
centroids = embeddings.mean(dim=1)
return centroids
def get_utterance_centroids(embeddings):
"""
Returns the centroids for each utterance of a speaker, where
the utterance centroid is the speaker centroid without considering
this utterance
Shape of embeddings should be:
(speaker_ct, utterance_per_speaker_ct, embedding_size)
"""
sum_centroids = embeddings.sum(dim=1)
sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1,
sum_centroids.shape[-1])
num_utterances = embeddings.shape[1] - 1
centroids = (sum_centroids - embeddings) / num_utterances
return centroids
def get_cossim(embeddings, centroids):
num_utterances = embeddings.shape[1]
utterance_centroids = get_utterance_centroids(embeddings)
utterance_centroids_flat = utterance_centroids.view(utterance_centroids
.shape[0] * utterance_centroids.shape[1], -1)
embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1)
cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)
centroids_expand = centroids.repeat((num_utterances * embeddings.shape[
0], 1))
embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.
shape[0], 1)
embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] *
embeddings_expand.shape[1], embeddings_expand.shape[-1])
cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)
cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids.
size(0))
same_idx = list(range(embeddings.size(0)))
cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0],
num_utterances)
cos_diff = cos_diff + 1e-06
return cos_diff
class Model(nn.Module):
def __init__(self, device):
super().__init__()
self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True)
self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True)
self.device = device
def forward(self, embeddings):
torch.clamp(self.w, 1e-06)
centroids = get_centroids(embeddings)
cossim = get_cossim(embeddings, centroids)
sim_matrix = self.w * cossim + self.b
loss, _ = calc_loss(sim_matrix)
return loss
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [0]
|
ReGLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qk/cqktl6a5kztlpt4hsz4gfob2rxmtfurvnwvwoend2jzbg534exfw.py
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.relu, aten.mul]
# Source node to ATen node mapping:
# g => relu
# x => mul
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %view_3), kwargs = {})
triton_poi_fused_mul_relu_0 = async_compile.triton('triton_poi_fused_mul_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.relu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_relu_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class ReGLU(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_relu_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class ReGLUNew(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU(
), True, False, False, False)
def forward(self, input_0):
primals_1 = self.ffn.layer1.weight
primals_3 = self.ffn.layer2.weight
primals_4 = self.ffn.linear_v.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| edchengmoore/pytorch_tabular | ReGLU | false | 3,448 | [
"MIT"
] | 0 | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class Model(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SwiGLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qy/cqycxwukc2ovy7fq4yqkhvz77tl2rhtpffsix7oclxznyexrjof4.py
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.silu, aten.mul]
# Source node to ATen node mapping:
# g => mul, sigmoid
# x => mul_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_3), kwargs = {})
triton_poi_fused_mul_silu_0 = async_compile.triton('triton_poi_fused_mul_silu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_silu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_silu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.silu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_silu_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class SwiGLU(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_silu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_silu_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class SwiGLUNew(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU(
), True, False, False, False)
def forward(self, input_0):
primals_1 = self.ffn.layer1.weight
primals_3 = self.ffn.layer2.weight
primals_4 = self.ffn.linear_v.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| edchengmoore/pytorch_tabular | SwiGLU | false | 3,449 | [
"MIT"
] | 0 | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class Model(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
NetDepth | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tk/ctkr5j63ngqpsdf5zpn24uwouxiguv7jseip5x6trqjye5gti4rn.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pw/cpwa7w2sf2mh2epi2372qi4mhvq3tcs5lmubo3et2g2uwsicveur.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c3/cc37wvituo2asffgdbn2cnuhsr4nuj5pzt75pvxxxx4t7tdtdkqj.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6x/c6xcobufboro7n2mhslbvtpc7gufgkiceaae3uce353mlq76ufls.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kw/ckwepldgfwta7pknoev2cmp4hkrsvqxe62ykmjjh5tmllll3jhmf.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (32, 256), (256, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (2, 32), (32, 1))
assert_size_stride(primals_11, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 4096, grid=grid(4096), stream=stream0)
buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 256), (256, 1), 0), buf13, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class NetDepth(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.relu(self.conv1(x)), 2)
out = F.max_pool2d(torch.relu(self.conv2(out)), 2)
out = F.max_pool2d(torch.relu(self.conv3(out)), 2)
out = out.view(-1, 4 * 4 * self.n_chans1 // 2)
out = torch.relu(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (32, 256), (256, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (2, 32), (32, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2,
buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6,
buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10,
buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(512)](buf13, primals_9, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(
primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16,
256), (256, 1), 0), buf13, primals_10, primals_8)
class NetDepthNew(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| dustasa/senior_software_HW | NetDepth | false | 3,450 | [
"Apache-2.0"
] | 0 | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.relu(self.conv1(x)), 2)
out = F.max_pool2d(torch.relu(self.conv2(out)), 2)
out = F.max_pool2d(torch.relu(self.conv3(out)), 2)
out = out.view(-1, 4 * 4 * self.n_chans1 // 2)
out = torch.relu(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
NetWidth | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gn/cgnvlzmo5ztw5s272ll3bpa2mrwfbo5qzmrahw4jghss7ba2s55x.py
# Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d => convolution
# tanh => tanh
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_tanh_0 = async_compile.triton('triton_poi_fused_convolution_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2g/c2gjtptvoismunimcke536q3isi4rj6vrtgwkkiebpeiintksopc.py
# Topologically Sorted Source Nodes: [conv2d_1, tanh_1], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# tanh_1 => tanh_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_tanh_2 = async_compile.triton('triton_poi_fused_convolution_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ec/cecaw7x564jfwuidwdxtkrmanoelm5tnnaxaobkxqxnnoobdi5sf.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%tanh_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tr/ctrwo7eblwgormkgc6rkz3mbvcqxgbihf45vdjyrhnv7sr6x6g4c.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out_3 => tanh_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 1024), (1024, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (2, 32), (32, 1))
assert_size_stride(primals_9, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, tanh], Original ATen: [aten.convolution, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_tanh_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, tanh_1], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_2.run(buf5, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 16384, grid=grid(16384), stream=stream0)
buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), reinterpret_tensor(primals_6, (1024, 32), (1, 1024), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_4.run(buf9, primals_7, 512, grid=grid(512), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class NetWidth(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.fc1 = nn.Linear(16 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)
out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)
out = out.view(-1, 16 * 8 * 8)
out = torch.tanh(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 1024), (1024, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (2, 32), (32, 1))
assert_size_stride(primals_9, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_tanh_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2,
buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_tanh_2[grid(65536)](buf5, primals_5,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6,
buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0
), reinterpret_tensor(primals_6, (1024, 32), (1, 1024), 0), out
=buf8)
buf9 = buf8
del buf8
triton_poi_fused_tanh_4[grid(512)](buf9, primals_7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9,
primals_8, primals_6)
class NetWidthNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.fc1 = nn.Linear(16 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| dustasa/senior_software_HW | NetWidth | false | 3,451 | [
"Apache-2.0"
] | 0 | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.fc1 = nn.Linear(16 * 8 * 8, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.tanh(self.conv1(x)), 2)
out = F.max_pool2d(torch.tanh(self.conv2(out)), 2)
out = out.view(-1, 16 * 8 * 8)
out = torch.tanh(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
NetRes | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tk/ctkr5j63ngqpsdf5zpn24uwouxiguv7jseip5x6trqjye5gti4rn.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pw/cpwa7w2sf2mh2epi2372qi4mhvq3tcs5lmubo3et2g2uwsicveur.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bd/cbd2t3bbfbaqjthluzw333ldqhy772mrn426hesnz3t7bxi7swbf.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_2, %getitem_2), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 16
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6x/c6xcobufboro7n2mhslbvtpc7gufgkiceaae3uce353mlq76ufls.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%add, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kw/ckwepldgfwta7pknoev2cmp4hkrsvqxe62ykmjjh5tmllll3jhmf.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (32, 256), (256, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (2, 32), (32, 1))
assert_size_stride(primals_11, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, relu_2, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_4.run(buf8, primals_7, buf6, buf9, buf15, 16384, grid=grid(16384), stream=stream0)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 4096, grid=grid(4096), stream=stream0)
buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 256), (256, 1), 0), buf13, primals_10, primals_8, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class NetRes(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.relu(self.conv1(x)), 2)
out = F.max_pool2d(torch.relu(self.conv2(out)), 2)
out1 = out
out = F.max_pool2d(torch.relu(self.conv3(out)) + out1, 2)
out = out.view(-1, 4 * 4 * self.n_chans1 // 2)
out = torch.relu(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_4(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (32, 256), (256, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (2, 32), (32, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2,
buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6,
buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(16384)
](buf8, primals_7, buf6, buf9, buf15, 16384, XBLOCK=256,
num_warps=4, num_stages=1)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10,
buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(512)](buf13, primals_9, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(
primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16,
256), (256, 1), 0), buf13, primals_10, primals_8, buf15)
class NetResNew(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| dustasa/senior_software_HW | NetRes | false | 3,452 | [
"Apache-2.0"
] | 0 | 767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed | import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_chans1=32):
super().__init__()
self.n_chans1 = n_chans1
self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3,
padding=1)
self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
out = F.max_pool2d(torch.relu(self.conv1(x)), 2)
out = F.max_pool2d(torch.relu(self.conv2(out)), 2)
out1 = out
out = F.max_pool2d(torch.relu(self.conv3(out)) + out1, 2)
out = out.view(-1, 4 * 4 * self.n_chans1 // 2)
out = torch.relu(self.fc1(out))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
L2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dp/cdpeuq4j7q7tnznu5hof2odasq3diuuwzm3n6m4b6p2gzl6rt2dv.py
# Topologically Sorted Source Nodes: [sub, norm, lossvalue], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean]
# Source node to ATen node mapping:
# lossvalue => mean
# norm => pow_1, pow_2, sum_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
triton_per_fused_linalg_vector_norm_mean_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = 64.0
tmp24 = tmp22 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, norm, lossvalue], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class L2(nn.Module):
def __init__(self):
super(L2, self).__init__()
def forward(self, output, target):
lossvalue = torch.norm(output - target, p=2, dim=1).mean()
return lossvalue
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = 64.0
tmp24 = tmp22 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2New(nn.Module):
def __init__(self):
super(L2New, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| eight0153/flownet2-pytorch | L2 | false | 3,453 | [
"Apache-2.0"
] | 0 | cc2964233cd18c8db05d1751281c6ab9d3165da6 | https://github.com/eight0153/flownet2-pytorch/tree/cc2964233cd18c8db05d1751281c6ab9d3165da6 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
lossvalue = torch.norm(output - target, p=2, dim=1).mean()
return lossvalue
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SIREN_CONV | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ep/ceps7alcqo6ixp7pux2qjaafeqqlxkyzwoeb2nun7wvzniy6qmmk.py
# Topologically Sorted Source Nodes: [x, mul, input_1], Original ATen: [aten.convolution, aten.mul, aten.sin]
# Source node to ATen node mapping:
# input_1 => sin
# mul => mul
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 30), kwargs = {})
# %sin : [num_users=2] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_convolution_mul_sin_0 = async_compile.triton('triton_poi_fused_convolution_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sin_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5t/c5t3onjsz7g4kgxerd23n3657yawwyguukgmxnhop4b73u3qinph.py
# Topologically Sorted Source Nodes: [x_1, mul_1, input_2], Original ATen: [aten.convolution, aten.mul, aten.sin]
# Source node to ATen node mapping:
# input_2 => sin_1
# mul_1 => mul_1
# x_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%sin, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 30), kwargs = {})
# %sin_1 : [num_users=2] = call_function[target=torch.ops.aten.sin.default](args = (%mul_1,), kwargs = {})
triton_poi_fused_convolution_mul_sin_1 = async_compile.triton('triton_poi_fused_convolution_mul_sin_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sin_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sin_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/l6/cl6hqobzfbynrauju3moqist73fdf3ff6zlakiewojjzu6bq24k6.py
# Topologically Sorted Source Nodes: [x_2, mul_2, input_3], Original ATen: [aten.convolution, aten.mul, aten.sin]
# Source node to ATen node mapping:
# input_3 => sin_2
# mul_2 => mul_2
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%sin_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 30), kwargs = {})
# %sin_2 : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_2,), kwargs = {})
triton_poi_fused_convolution_mul_sin_2 = async_compile.triton('triton_poi_fused_convolution_mul_sin_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sin_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sin_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mul, input_1], Original ATen: [aten.convolution, aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sin_0.run(buf1, primals_2, buf2, 4096, grid=grid(4096), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, mul_1, input_2], Original ATen: [aten.convolution, aten.mul, aten.sin]
triton_poi_fused_convolution_mul_sin_1.run(buf4, primals_5, buf5, 2048, grid=grid(2048), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, mul_2, input_3], Original ATen: [aten.convolution, aten.mul, aten.sin]
triton_poi_fused_convolution_mul_sin_2.run(buf7, primals_7, buf8, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layer, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
class SIREN_CONV(nn.Module):
def __init__(self, ch_in, ch_out):
super(SIREN_CONV, self).__init__()
self.conv1 = SIREN_layer(ch_in, 64, frist=True)
self.conv2 = SIREN_layer(64, 32)
self.conv3 = SIREN_layer(32, ch_out)
self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3)
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch_in': 4, 'ch_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_mul_sin_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_mul_sin_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sin_0[grid(4096)](buf1, primals_2,
buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_mul_sin_1[grid(2048)](buf4, primals_5,
buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_mul_sin_2[grid(256)](buf7, primals_7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5, buf7)
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layer, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
class SIREN_CONVNew(nn.Module):
def __init__(self, ch_in, ch_out):
super(SIREN_CONVNew, self).__init__()
self.conv1 = SIREN_layer(ch_in, 64, frist=True)
self.conv2 = SIREN_layer(64, 32)
self.conv3 = SIREN_layer(32, ch_out)
self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3)
def forward(self, input_0):
primals_1 = self.conv1.conv1.weight
primals_2 = self.conv1.conv1.bias
primals_4 = self.conv2.conv1.weight
primals_5 = self.conv2.conv1.bias
primals_6 = self.conv3.conv1.weight
primals_7 = self.conv3.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| dustlrdk/noise2self | SIREN_CONV | false | 3,454 | [
"MIT"
] | 0 | 46e8c4650f7ec4f664448417fecd39b4cae477f7 | https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7 | import torch
import numpy as np
import torch.nn as nn
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super().__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
class Model(nn.Module):
def __init__(self, ch_in, ch_out):
super().__init__()
self.conv1 = SIREN_layer(ch_in, 64, frist=True)
self.conv2 = SIREN_layer(64, 32)
self.conv3 = SIREN_layer(32, ch_out)
self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3)
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GEGLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/em/cemg2y45l6deao7jdm2w6eokhhfga7r2ohhlsue6j5tme4ofmwni.py
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul]
# Source node to ATen node mapping:
# g => add, erf, mul, mul_1, mul_2
# x => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view_3), kwargs = {})
triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_mul_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class GEGLU(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class GEGLUNew(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU(
), True, False, False, False)
def forward(self, input_0):
primals_1 = self.ffn.layer1.weight
primals_3 = self.ffn.layer2.weight
primals_4 = self.ffn.linear_v.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| edchengmoore/pytorch_tabular | GEGLU | false | 3,455 | [
"MIT"
] | 0 | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1,
activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True,
bias2: 'bool'=True, bias_gate: 'bool'=True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
self.dropout = nn.Dropout(dropout)
self.activation = activation
self.is_gated = is_gated
if is_gated:
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: 'torch.Tensor'):
g = self.activation(self.layer1(x))
if self.is_gated:
x = g * self.linear_v(x)
else:
x = g
x = self.dropout(x)
return self.layer2(x)
class Model(nn.Module):
def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1):
super().__init__()
self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU(
), True, False, False, False)
def forward(self, x: 'torch.Tensor'):
return self.ffn(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ExtremeLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j2/cj2nf7m36iodxg6bagn7ostrasaexmiy7b3qvqy2fxyprxsalfwt.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.full]
# Source node to ATen node mapping:
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_full_0 = async_compile.triton('triton_poi_fused_full_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_full_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_full_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0.0, tl.float64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/el/celea7ezymt2wd7zgg6hdmpdspmn3coo7pthct7mxpnf5xoq5ix4.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.full]
# Source node to ATen node mapping:
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1e-15), kwargs = {dtype: torch.float64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_full_1 = async_compile.triton('triton_poi_fused_full_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_full_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_full_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 1e-15, tl.float64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((), (), torch.float64)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.full]
stream0 = get_raw_stream(0)
triton_poi_fused_full_0.run(buf1, 1, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float64)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.full]
triton_poi_fused_full_1.run(buf2, 1, grid=grid(1), stream=stream0)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.full, aten.linalg_pinv]
buf3 = torch.ops.aten.linalg_pinv.atol_rtol_tensor(primals_3, atol=buf1, rtol=buf2)
del buf1
del buf2
del primals_3
buf4 = buf3
del buf3
return (buf0, primals_1, buf0, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch import autograd
from torch.nn import init
class ExtremeLinearFunction(autograd.Function):
@staticmethod
def forward(ctx, input, forward_weight, feedback_weight):
ctx.save_for_backward(input, forward_weight, feedback_weight)
output = input.mm(forward_weight.t())
ctx.output = output
return output
@staticmethod
def backward(ctx, grad_output):
input, forward_weight, _feedback_weight = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[1]:
inv_x = torch.pinverse(input)
error_weight = inv_x.mm(ctx.output - grad_output)
grad_weight = forward_weight - error_weight.t()
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(forward_weight)
return grad_input, grad_weight, None
class ExtremeLinear(nn.Module):
def __init__(self, input_features, output_features):
super(ExtremeLinear, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.forward_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.feedback_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.reset_parameters()
def reset_parameters(self) ->None:
init.kaiming_uniform_(self.forward_weight, a=math.sqrt(5))
init.kaiming_uniform_(self.feedback_weight, a=math.sqrt(5))
def forward(self, input):
return ExtremeLinearFunction.apply(input, self.forward_weight, self
.feedback_weight)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_features': 4, 'output_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
from torch import autograd
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_full_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0.0, tl.float64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
@triton.jit
def triton_poi_fused_full_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 1e-15, tl.float64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((), (), torch.float64)
get_raw_stream(0)
triton_poi_fused_full_0[grid(1)](buf1, 1, XBLOCK=1, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float64)
triton_poi_fused_full_1[grid(1)](buf2, 1, XBLOCK=1, num_warps=1,
num_stages=1)
buf3 = torch.ops.aten.linalg_pinv.atol_rtol_tensor(primals_3, atol=
buf1, rtol=buf2)
del buf1
del buf2
del primals_3
buf4 = buf3
del buf3
return buf0, primals_1, buf0, buf4
class ExtremeLinearFunction(autograd.Function):
@staticmethod
def forward(ctx, input, forward_weight, feedback_weight):
ctx.save_for_backward(input, forward_weight, feedback_weight)
output = input.mm(forward_weight.t())
ctx.output = output
return output
@staticmethod
def backward(ctx, grad_output):
input, forward_weight, _feedback_weight = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[1]:
inv_x = torch.pinverse(input)
error_weight = inv_x.mm(ctx.output - grad_output)
grad_weight = forward_weight - error_weight.t()
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(forward_weight)
return grad_input, grad_weight, None
class ExtremeLinearNew(nn.Module):
def __init__(self, input_features, output_features):
super(ExtremeLinearNew, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.forward_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.feedback_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.reset_parameters()
def reset_parameters(self) ->None:
init.kaiming_uniform_(self.forward_weight, a=math.sqrt(5))
init.kaiming_uniform_(self.feedback_weight, a=math.sqrt(5))
def forward(self, input_0):
primals_1 = self.forward_weight
primals_2 = self.feedback_weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| crazyleg/lateral_research | ExtremeLinear | false | 3,456 | [
"MIT"
] | 0 | e186d218cd4b3ac3770e9fa375bc57133e4dafe5 | https://github.com/crazyleg/lateral_research/tree/e186d218cd4b3ac3770e9fa375bc57133e4dafe5 | import math
import torch
from torch import nn
from torch import autograd
from torch.nn import init
class ExtremeLinearFunction(autograd.Function):
@staticmethod
def forward(ctx, input, forward_weight, feedback_weight):
ctx.save_for_backward(input, forward_weight, feedback_weight)
output = input.mm(forward_weight.t())
ctx.output = output
return output
@staticmethod
def backward(ctx, grad_output):
input, forward_weight, _feedback_weight = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[1]:
inv_x = torch.pinverse(input)
error_weight = inv_x.mm(ctx.output - grad_output)
grad_weight = forward_weight - error_weight.t()
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(forward_weight)
return grad_input, grad_weight, None
class Model(nn.Module):
def __init__(self, input_features, output_features):
super().__init__()
self.input_features = input_features
self.output_features = output_features
self.forward_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.feedback_weight = nn.Parameter(torch.Tensor(output_features,
input_features))
self.reset_parameters()
def reset_parameters(self) ->None:
init.kaiming_uniform_(self.forward_weight, a=math.sqrt(5))
init.kaiming_uniform_(self.feedback_weight, a=math.sqrt(5))
def forward(self, input):
return ExtremeLinearFunction.apply(input, self.forward_weight, self
.feedback_weight)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
ClsHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_3 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_3 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ClsHead(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super(ClsHead, self).__init__()
self.training = False
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim, bias=True)
def forward(self, x):
x = self.pool(x)
x = torch.reshape(x, shape=[x.shape[0], x.shape[1]])
x = self.fc(x)
if not self.training:
x = F.softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'class_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf4
class ClsHeadNew(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super(ClsHeadNew, self).__init__()
self.training = False
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim, bias=True)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| eminem171333491/PaddleOCR2Pytorch | ClsHead | false | 3,457 | [
"Apache-2.0"
] | 0 | ec466bb3a689eccb9290e9f80812a45301d3b030 | https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super().__init__()
self.training = False
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim, bias=True)
def forward(self, x):
x = self.pool(x)
x = torch.reshape(x, shape=[x.shape[0], x.shape[1]])
x = self.fc(x)
if not self.training:
x = F.softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CTCHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ai/caiy5z5nrpk27bcwdhmejk63orle3ulxiwsuoewsfbsdh5jaa66y.py
# Topologically Sorted Source Nodes: [predicts_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# predicts_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 424000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6625
x3 = (xindex // 6625)
x2 = (xindex // 26500)
tmp0 = tl.load(in_ptr0 + (x0 + (6656*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (6656 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (13312 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (19968 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x0 + (6656*x3)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4s/c4syvx6g4zy3bk4mn2ylv7tfly6d6e26bto63qn4lzuqsjyk7gyh.py
# Topologically Sorted Source Nodes: [predicts_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# predicts_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 424000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6625
x3 = (xindex // 6625)
x2 = (xindex // 26500)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6656*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (6656 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (13312 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (19968 + x0 + (26624*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6625, 4), (4, 1))
assert_size_stride(primals_2, (6625, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6625), (6656, 1), torch.float32)
# Topologically Sorted Source Nodes: [predicts], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6625), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 6625), (106496, 26624, 6656, 1), torch.float32)
# Topologically Sorted Source Nodes: [predicts_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 424000, grid=grid(424000), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 6625), (106000, 26500, 6625, 1), torch.float32)
# Topologically Sorted Source Nodes: [predicts_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 424000, grid=grid(424000), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6625, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6625, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CTCHead(nn.Module):
def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004,
mid_channels=None, **kwargs):
super(CTCHead, self).__init__()
if mid_channels is None:
self.fc = nn.Linear(in_channels, out_channels, bias=True)
else:
self.fc1 = nn.Linear(in_channels, mid_channels, bias=True)
self.fc2 = nn.Linear(mid_channels, out_channels, bias=True)
self.out_channels = out_channels
self.mid_channels = mid_channels
def forward(self, x, labels=None):
if self.mid_channels is None:
predicts = self.fc(x)
else:
predicts = self.fc1(x)
predicts = self.fc2(predicts)
if not self.training:
predicts = F.softmax(predicts, dim=2)
return predicts
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 424000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6625
x3 = xindex // 6625
x2 = xindex // 26500
tmp0 = tl.load(in_ptr0 + (x0 + 6656 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 26624 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (6656 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (13312 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (19968 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x0 + 6656 * x3), tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 424000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6625
x3 = xindex // 6625
x2 = xindex // 26500
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6656 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 26624 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (6656 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (13312 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (19968 + x0 + 26624 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6625, 4), (4, 1))
assert_size_stride(primals_2, (6625,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6625), (6656, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6625), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 6625), (106496, 26624, 6656, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(424000)](buf0, buf1, 424000,
XBLOCK=512, num_warps=8, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 6625), (106000, 26500, 6625, 1),
torch.float32)
triton_poi_fused__softmax_1[grid(424000)](buf1, buf2, 424000,
XBLOCK=512, num_warps=8, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class CTCHeadNew(nn.Module):
def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004,
mid_channels=None, **kwargs):
super(CTCHeadNew, self).__init__()
if mid_channels is None:
self.fc = nn.Linear(in_channels, out_channels, bias=True)
else:
self.fc1 = nn.Linear(in_channels, mid_channels, bias=True)
self.fc2 = nn.Linear(mid_channels, out_channels, bias=True)
self.out_channels = out_channels
self.mid_channels = mid_channels
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| eminem171333491/PaddleOCR2Pytorch | CTCHead | false | 3,458 | [
"Apache-2.0"
] | 0 | ec466bb3a689eccb9290e9f80812a45301d3b030 | https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004,
mid_channels=None, **kwargs):
super().__init__()
if mid_channels is None:
self.fc = nn.Linear(in_channels, out_channels, bias=True)
else:
self.fc1 = nn.Linear(in_channels, mid_channels, bias=True)
self.fc2 = nn.Linear(mid_channels, out_channels, bias=True)
self.out_channels = out_channels
self.mid_channels = mid_channels
def forward(self, x, labels=None):
if self.mid_channels is None:
predicts = self.fc(x)
else:
predicts = self.fc1(x)
predicts = self.fc2(predicts)
if not self.training:
predicts = F.softmax(predicts, dim=2)
return predicts
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bt/cbtukfelygvwruykgqxvoito6trim7hi4gocn4mpohjjmw63qewu.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul
# product_2 => add
# weights => amax, exp, sub, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_0 = async_compile.triton('triton_poi_fused__softmax_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zz/czzjxm5452x7hgytt4wj5g5gsmoluemkcheo5nyv5rfsehlnlt6g.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul
# product_2 => add
# weights => amax, div, exp, sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_1 = async_compile.triton('triton_poi_fused__softmax_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_add_mul_0.run(buf3, primals_7, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_1.run(buf6, primals_7, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf4
del buf5
del primals_7
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf8)
return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), primals_8, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
def get_inputs():
return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4]), torch.rand(
[4, 4, 1, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_key': 4, 'd_value': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_add_mul_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_mul_0[grid(16)](buf3, primals_7, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_add_mul_1[grid(64)](buf6, primals_7, buf4,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_7
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf8)
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0
), primals_8, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
class MultiHeadAttentionNew(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttentionNew, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.q_fc.weight
primals_5 = self.k_fc.weight
primals_6 = self.v_fc.weight
primals_7 = self.proj_fc.weight
primals_1 = input_0
primals_2 = input_1
primals_4 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| eminem171333491/PaddleOCR2Pytorch | MultiHeadAttention | false | 3,459 | [
"Apache-2.0"
] | 0 | ec466bb3a689eccb9290e9f80812a45301d3b030 | https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super().__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
def get_inputs():
return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4]), torch.rand(
[4, 4, 1, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AddNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gi/cgi45knaonkz7f2ymhhwdszxotrkx3bl6ehavmfo5zp3bzzio3zk.py
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# layer_norm => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_0 = async_compile.triton('triton_poi_fused_add_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nh/cnhbjzi35eshrjhk7tjj644pcpxhgyi7ajxmdjzgjtdqbnbek7qt.py
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_native_layer_norm_0.run(primals_1, primals_2, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_1.run(primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
del primals_3
del primals_4
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AddNorm(nn.Module):
"""
Applies LayerNorm, Dropout and adds to input. Standard AddNorm operations in Transformers
"""
def __init__(self, input_dim: 'int', dropout: 'float'):
super(AddNorm, self).__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, X: 'torch.Tensor', Y: 'torch.Tensor') ->torch.Tensor:
return self.ln(self.dropout(Y) + X)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'dropout': 0.5}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1,
primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1,
primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
del primals_3
del primals_4
return buf3, buf2
class AddNormNew(nn.Module):
"""
Applies LayerNorm, Dropout and adds to input. Standard AddNorm operations in Transformers
"""
def __init__(self, input_dim: 'int', dropout: 'float'):
super(AddNormNew, self).__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, input_0, input_1):
primals_3 = self.ln.weight
primals_4 = self.ln.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| edchengmoore/pytorch_tabular | AddNorm | false | 3,460 | [
"MIT"
] | 0 | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Applies LayerNorm, Dropout and adds to input. Standard AddNorm operations in Transformers
"""
def __init__(self, input_dim: 'int', dropout: 'float'):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, X: 'torch.Tensor', Y: 'torch.Tensor') ->torch.Tensor:
return self.ln(self.dropout(Y) + X)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 0.5]
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/la/clalnn5iz2syotwgvds5fjb6mtcklh5yizks6zdxu552jin7zbwe.py
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# residual => convolution
# residual_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gz/cgz3i5sfiy2r2zcw7on5c3vgziypvqy3tqtrdk4orqs3phgqxlob.py
# Topologically Sorted Source Nodes: [residual_2, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add
# residual_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten._prelu_kernel]
stream0 = get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [residual_2, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf4, primals_3, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.prelu(residual)
residual = self.conv2(residual)
return x + residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1,
primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_1[grid(256)](buf4, primals_3,
primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.prelu.weight
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| elaina03/Single-Image-Dehazing | ResidualBlock | false | 3,463 | [
"MIT"
] | 0 | a6a29cb5591204f8066729df4053db0ea2b54aff | https://github.com/elaina03/Single-Image-Dehazing/tree/a6a29cb5591204f8066729df4053db0ea2b54aff | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.prelu(residual)
residual = self.conv2(residual)
return x + residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [p_x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# p_x => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b6/cb6p4coebyc36mfgscp5avnf6elbckuaiicncljn73yyl7dip5fo.py
# Topologically Sorted Source Nodes: [p_x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# p_x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [p_x], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [p_x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_6, buf5, 16, grid=grid(16), stream=stream0)
del primals_6
return (buf4, buf0, buf2, buf5, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x, c):
x = torch.cat([x, c], 1)
p_x = F.relu(self.fc1(x))
p_x = self.fc2(p_x)
p_x = F.relu(self.fc3(p_x))
return p_x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'sample_size': 4, 'condition_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4,
primals_6, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_6
return buf4, buf0, buf2, buf5, primals_5
class EncoderNew(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc3.weight
primals_6 = self.fc3.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| ekrell/learn-planning-space | Encoder | false | 3,464 | [
"MIT"
] | 0 | 730e448bffa4996b2b1ef3a5b00500dc172962ec | https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x, c):
x = torch.cat([x, c], 1)
p_x = F.relu(self.fc1(x))
p_x = self.fc2(p_x)
p_x = F.relu(self.fc3(p_x))
return p_x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
LatentZ | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wv/cwvq4to2op26b5663yzk3drrk4ugopokyimkbel66rq2azxpcsur.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, add], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# mul_1 => mul_1
# std => exp
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %randn), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %view_1), kwargs = {})
triton_poi_fused_add_exp_mul_0 = async_compile.triton('triton_poi_fused_add_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf2 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, mul_1, add], Original ATen: [aten.mul, aten.exp, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_mul_0.run(buf1, buf3, buf0, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LatentZ(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, p_x):
mu = self.mu(p_x)
logvar = self.logvar(p_x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return std * eps + mu, logvar, mu
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'latent_size': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_mul_0[grid(256)](buf1, buf3, buf0, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3
class LatentZNew(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, input_0):
primals_1 = self.mu.weight
primals_2 = self.mu.bias
primals_4 = self.logvar.weight
primals_5 = self.logvar.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| ekrell/learn-planning-space | LatentZ | false | 3,467 | [
"MIT"
] | 0 | 730e448bffa4996b2b1ef3a5b00500dc172962ec | https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, p_x):
mu = self.mu(p_x)
logvar = self.logvar(p_x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return std * eps + mu, logvar, mu
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
UpConv2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mx/cmxtwcwrggdrnqr5kbe5mb6upgf3zd63wrsqszl5hsgucc2chk55.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.pixel_shuffle]
# Source node to ATen node mapping:
# output => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_pixel_shuffle_0 = async_compile.triton('triton_poi_fused_pixel_shuffle_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 2], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pixel_shuffle_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 2
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x5 = xindex
y0 = yindex % 64
y1 = (yindex // 64) % 2
y2 = (yindex // 128) % 64
y6 = (yindex // 8192)
y3 = (yindex // 8192) % 3
y7 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (64*y2) + (4096*x5) + (8192*y1) + (16384*y6)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + (2*y1) + (4*y3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5 + (2*y7)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.pixel_shuffle]
stream0 = get_raw_stream(0)
triton_poi_fused_pixel_shuffle_0.run(buf0, primals_2, buf1, 98304, 2, grid=grid(98304, 2), stream=stream0)
del buf0
del primals_2
return (reinterpret_tensor(buf1, (4, 3, 128, 128), (49152, 16384, 128, 1), 0), primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class UpConv2D(nn.Module):
def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2):
super(UpConv2D, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels * ratio ** 2,
kernel_size, padding=kernel_size // 2)
self.upscale = nn.PixelShuffle(ratio)
def forward(self, input_):
x = self.conv(input_)
output = self.upscale(x)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pixel_shuffle_0(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 2
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x5 = xindex
y0 = yindex % 64
y1 = yindex // 64 % 2
y2 = yindex // 128 % 64
y6 = yindex // 8192
y3 = yindex // 8192 % 3
y7 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * y2 + 4096 * x5 + 8192 * y1 + 16384 *
y6), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 2 * y1 + 4 * y3), xmask, eviction_policy
='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5 + 2 * y7), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256,
128, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pixel_shuffle_0[grid(98304, 2)](buf0, primals_2,
buf1, 98304, 2, XBLOCK=2, YBLOCK=512, num_warps=4, num_stages=1)
del buf0
del primals_2
return reinterpret_tensor(buf1, (4, 3, 128, 128), (49152, 16384, 128, 1), 0
), primals_1, primals_3
class UpConv2DNew(nn.Module):
def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2):
super(UpConv2DNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels * ratio ** 2,
kernel_size, padding=kernel_size // 2)
self.upscale = nn.PixelShuffle(ratio)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| emirkonuk/defocus | UpConv2D | false | 3,468 | [
"Apache-2.0"
] | 0 | da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50 | https://github.com/emirkonuk/defocus/tree/da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels * ratio ** 2,
kernel_size, padding=kernel_size // 2)
self.upscale = nn.PixelShuffle(ratio)
def forward(self, input_):
x = self.conv(input_)
output = self.upscale(x)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
PrototypicalNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hp/chptg55ofbvcnhmdfgvcjqku736b3xcyxeahxk2eb2jpz7f2wey2.py
# Topologically Sorted Source Nodes: [prototypes_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# prototypes_1 => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sa/csatjxzzljvzrbsywuhl5u6duvaywsdqxfdcvhr7vxe6wtpp43gn.py
# Topologically Sorted Source Nodes: [neg, add, mul_2, dist, dist_1, scores_cls], Original ATen: [aten.neg, aten.add, aten.mul, aten.sub, aten.div]
# Source node to ATen node mapping:
# add => add
# dist => sub
# dist_1 => div_1
# mul_2 => mul_2
# neg => neg
# scores_cls => mul_3
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_4,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, %expand_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_2), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %div_1), kwargs = {})
triton_poi_fused_add_div_mul_neg_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_neg_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4*x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_out_ptr0 + (x3), xmask)
tmp29 = tl.load(in_ptr2 + (0))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 0.25
tmp28 = tmp26 * tmp27
tmp31 = -tmp30
tmp32 = tmp31 * tmp28
tl.store(in_out_ptr0 + (x3), tmp28, xmask)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prototypes], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [prototypes_1], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [AB], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, add, mul_2, dist, dist_1, scores_cls], Original ATen: [aten.neg, aten.add, aten.mul, aten.sub, aten.div]
triton_poi_fused_add_div_mul_neg_sub_1.run(buf4, primals_3, buf1, primals_4, buf5, 64, grid=grid(64), stream=stream0)
del buf1
del primals_3
del primals_4
return (buf5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.optim
import torch.nn.parallel
def L2SquareDist(A, B, average=True):
assert A.dim() == 3
assert B.dim() == 3
assert A.size(0) == B.size(0) and A.size(2) == B.size(2)
nB = A.size(0)
Na = A.size(1)
Nb = B.size(1)
nC = A.size(2)
AB = torch.bmm(A, B.transpose(1, 2))
AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1)
BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb)
dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB
if average:
dist = dist / nC
return dist
class PrototypicalNetwork(nn.Module):
def __init__(self, opt):
super(PrototypicalNetwork, self).__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, features_test, features_train, labels_train):
"""Recognize novel categories based on the Prototypical Nets approach.
Classify the test examples (i.e., `features_test`) using the available
training examples (i.e., `features_test` and `labels_train`) using the
Prototypical Nets approach.
Args:
features_test: A 3D tensor with shape
[batch_size x num_test_examples x num_channels] that represents
the test features of each training episode in the batch.
features_train: A 3D tensor with shape
[batch_size x num_train_examples x num_channels] that represents
the train features of each training episode in the batch.
labels_train: A 3D tensor with shape
[batch_size x num_train_examples x nKnovel] that represents
the train labels (encoded as 1-hot vectors) of each training
episode in the batch.
Return:
scores_cls: A 3D tensor with shape
[batch_size x num_test_examples x nKnovel] that represents the
classification scores of the test feature vectors for the
nKnovel novel categories.
"""
assert features_train.dim() == 3
assert labels_train.dim() == 3
assert features_test.dim() == 3
assert features_train.size(0) == labels_train.size(0)
assert features_train.size(0) == features_test.size(0)
assert features_train.size(1) == labels_train.size(1)
assert features_train.size(2) == features_test.size(2)
labels_train_transposed = labels_train.transpose(1, 2)
prototypes = torch.bmm(labels_train_transposed, features_train)
prototypes = prototypes.div(labels_train_transposed.sum(dim=2,
keepdim=True).expand_as(prototypes))
scores_cls = -self.scale_cls * L2SquareDist(features_test, prototypes)
return scores_cls
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'opt': _mock_config(scale_cls=1.0)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_out_ptr0 + x3, xmask)
tmp29 = tl.load(in_ptr2 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 0.25
tmp28 = tmp26 * tmp27
tmp31 = -tmp30
tmp32 = tmp31 * tmp28
tl.store(in_out_ptr0 + x3, tmp28, xmask)
tl.store(out_ptr0 + x3, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1,
4), 0), primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf1, (4, 4, 4), (
16, 1, 4), 0), out=buf2)
buf3 = buf2
del buf2
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_neg_sub_1[grid(64)](buf4, primals_3,
buf1, primals_4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_3
del primals_4
return buf5, buf4
def L2SquareDist(A, B, average=True):
assert A.dim() == 3
assert B.dim() == 3
assert A.size(0) == B.size(0) and A.size(2) == B.size(2)
nB = A.size(0)
Na = A.size(1)
Nb = B.size(1)
nC = A.size(2)
AB = torch.bmm(A, B.transpose(1, 2))
AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1)
BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb)
dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB
if average:
dist = dist / nC
return dist
class PrototypicalNetworkNew(nn.Module):
def __init__(self, opt):
super(PrototypicalNetworkNew, self).__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, input_0, input_1, input_2):
primals_4 = self.scale_cls
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Basasuya/FewShotWithoutForgetting | PrototypicalNetwork | false | 3,469 | [
"MIT"
] | 0 | eecc70e416ed82999124ddfca1b145f6dbcd74a6 | https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.optim
import torch.nn.parallel
def L2SquareDist(A, B, average=True):
assert A.dim() == 3
assert B.dim() == 3
assert A.size(0) == B.size(0) and A.size(2) == B.size(2)
nB = A.size(0)
Na = A.size(1)
Nb = B.size(1)
nC = A.size(2)
AB = torch.bmm(A, B.transpose(1, 2))
AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1)
BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb)
dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB
if average:
dist = dist / nC
return dist
class Model(nn.Module):
def __init__(self, opt):
super().__init__()
scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls),
requires_grad=True)
def forward(self, features_test, features_train, labels_train):
"""Recognize novel categories based on the Prototypical Nets approach.
Classify the test examples (i.e., `features_test`) using the available
training examples (i.e., `features_test` and `labels_train`) using the
Prototypical Nets approach.
Args:
features_test: A 3D tensor with shape
[batch_size x num_test_examples x num_channels] that represents
the test features of each training episode in the batch.
features_train: A 3D tensor with shape
[batch_size x num_train_examples x num_channels] that represents
the train features of each training episode in the batch.
labels_train: A 3D tensor with shape
[batch_size x num_train_examples x nKnovel] that represents
the train labels (encoded as 1-hot vectors) of each training
episode in the batch.
Return:
scores_cls: A 3D tensor with shape
[batch_size x num_test_examples x nKnovel] that represents the
classification scores of the test feature vectors for the
nKnovel novel categories.
"""
assert features_train.dim() == 3
assert labels_train.dim() == 3
assert features_test.dim() == 3
assert features_train.size(0) == labels_train.size(0)
assert features_train.size(0) == features_test.size(0)
assert features_train.size(1) == labels_train.size(1)
assert features_train.size(2) == features_test.size(2)
labels_train_transposed = labels_train.transpose(1, 2)
prototypes = torch.bmm(labels_train_transposed, features_train)
prototypes = prototypes.div(labels_train_transposed.sum(dim=2,
keepdim=True).expand_as(prototypes))
scores_cls = -self.scale_cls * L2SquareDist(features_test, prototypes)
return scores_cls
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return []
|
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n3/cn3h43fi4m7oq2vwlktxfhxi3dzck4gnc765fyme47rufsuxazkg.py
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# product => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# product => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yw/cyw3ff4nmszw3dpfuipofodyezjcpjoru35h7fhkaosfnlrctm2g.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul_2
# product_2 => add_2
# weights => amax, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4j/c4js4wnrmajokobx5l4yjjeu36aktrpkep2mzo6qtwttwlqodwbm.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul_2
# product_2 => add_2
# weights => amax, div, exp, sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_5 = async_compile.triton('triton_poi_fused__softmax_add_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zi/czic6s5idip57syewxigjtom43flziklldd4ea2qpsxjorxgbunq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %primals_3), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py
# Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf4, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf8, primals_7, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_5.run(buf11, primals_7, buf9, buf10, 256, grid=grid(256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.native_dropout]
buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True)
buf13 = buf12[0]
buf14 = buf12[1]
del buf12
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf5, buf15, 256, grid=grid(256), stream=stream0)
buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
del buf16
buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_dropout]
buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf20 = buf19[0]
buf21 = buf19[1]
del buf19
buf22 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf22, primals_3, 64, grid=grid(64), stream=stream0)
buf23 = buf1; del buf1 # reuse
buf24 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf22, buf23, buf24, 16, grid=grid(16), stream=stream0)
buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf22, buf23, buf24, primals_9, primals_10, buf25, 64, grid=grid(64), stream=stream0)
del buf23
del buf24
del primals_10
buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0); del buf26 # reuse
buf36 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_7.run(buf27, primals_12, buf36, 64, grid=grid(64), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [hidden_1, hidden_2], Original ATen: [aten.relu, aten.native_dropout]
buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True)
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf31)
del primals_14
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_dropout]
buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
del buf31
buf33 = buf32[0]
buf34 = buf32[1]
del buf32
buf35 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf35, buf22, 64, grid=grid(64), stream=stream0)
return (buf35, primals_3, primals_9, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11, buf14, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), buf21, buf22, reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), buf34, primals_13, buf36, primals_11, primals_8, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6, primals_5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x, y):
return self.func(x, y)
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = nn.ModuleList()
cur_a_len = 0
cur_n_len = 0
cur_d_len = 0
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.add_module('add_res_connect_{}'.format(
cur_a_len), LambdaXY(lambda x, y: x + y if y is not
None else x))
cur_a_len += 1
elif cmd == 'n':
layerNorm = torch.nn.LayerNorm(normalized_shape=d_model,
elementwise_affine=True, eps=1e-05)
self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm
)
cur_n_len += 1
elif cmd == 'd':
self.functors.add_module('add_drop_{}'.format(cur_d_len),
Lambda(lambda x: F.dropout(x, p=dropout_rate) if
dropout_rate else x))
cur_d_len += 1
def forward(self, x, residual=None):
for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)
):
if cmd == 'a':
x = functor(x, residual)
else:
x = functor(x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4,
'd_inner_hid': 4, 'prepostprocess_dropout': 0.5,
'attention_dropout': 0.5, 'relu_dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True)
buf13 = buf12[0]
buf14 = buf12[1]
del buf12
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf5, buf15, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16
)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf16, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf16
buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18)
buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor(
buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf20 = buf19[0]
buf21 = buf19[1]
del buf19
buf22 = buf20
del buf20
triton_poi_fused_add_6[grid(64)](buf22, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf23 = buf1
del buf1
buf24 = buf0
del buf0
triton_poi_fused_native_layer_norm_0[grid(16)](buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_native_layer_norm_1[grid(64)](buf22, buf23, buf24,
primals_9, primals_10, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf23
del buf24
del primals_10
buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
buf36 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf27,
primals_12, buf36, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True)
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0)
del buf27
extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf31)
del primals_14
buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor(
buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
del buf31
buf33 = buf32[0]
buf34 = buf32[1]
del buf32
buf35 = buf33
del buf33
triton_poi_fused_add_6[grid(64)](buf35, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf35, primals_3, primals_9, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), buf11, buf14, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), buf21, buf22, reinterpret_tensor(buf25, (16, 4), (4, 1), 0
), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0
), buf34, primals_13, buf36, primals_11, primals_8, reinterpret_tensor(
buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0
), primals_6, primals_5, primals_4
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x, y):
return self.func(x, y)
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = nn.ModuleList()
cur_a_len = 0
cur_n_len = 0
cur_d_len = 0
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.add_module('add_res_connect_{}'.format(
cur_a_len), LambdaXY(lambda x, y: x + y if y is not
None else x))
cur_a_len += 1
elif cmd == 'n':
layerNorm = torch.nn.LayerNorm(normalized_shape=d_model,
elementwise_affine=True, eps=1e-05)
self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm
)
cur_n_len += 1
elif cmd == 'd':
self.functors.add_module('add_drop_{}'.format(cur_d_len),
Lambda(lambda x: F.dropout(x, p=dropout_rate) if
dropout_rate else x))
cur_d_len += 1
def forward(self, x, residual=None):
for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)
):
if cmd == 'a':
x = functor(x, residual)
else:
x = functor(x)
return x
class EncoderLayerNew(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayerNew, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, input_0, input_1):
primals_1 = self.preprocesser1.functors.layer_norm_0.weight
primals_2 = self.preprocesser1.functors.layer_norm_0.bias
primals_4 = self.self_attn.q_fc.weight
primals_5 = self.self_attn.k_fc.weight
primals_6 = self.self_attn.v_fc.weight
primals_8 = self.self_attn.proj_fc.weight
primals_9 = self.preprocesser2.functors.layer_norm_0.weight
primals_10 = self.preprocesser2.functors.layer_norm_0.bias
primals_11 = self.ffn.fc1.weight
primals_12 = self.ffn.fc1.bias
primals_13 = self.ffn.fc2.weight
primals_14 = self.ffn.fc2.bias
primals_3 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| eminem171333491/PaddleOCR2Pytorch | EncoderLayer | false | 3,470 | [
"Apache-2.0"
] | 0 | ec466bb3a689eccb9290e9f80812a45301d3b030 | https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super().__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super().__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `fun
# ... truncated (>4000 chars) for memory efficiency |
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n3/cn3h43fi4m7oq2vwlktxfhxi3dzck4gnc765fyme47rufsuxazkg.py
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# product => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# product => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yw/cyw3ff4nmszw3dpfuipofodyezjcpjoru35h7fhkaosfnlrctm2g.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul_2
# product_2 => add_2
# weights => amax, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4j/c4js4wnrmajokobx5l4yjjeu36aktrpkep2mzo6qtwttwlqodwbm.py
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# product_1 => mul_2
# product_2 => add_2
# weights => amax, div, exp, sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.5), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_5 = async_compile.triton('triton_poi_fused__softmax_add_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zi/czic6s5idip57syewxigjtom43flziklldd4ea2qpsxjorxgbunq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %primals_3), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py
# Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf4, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [product], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf8, primals_7, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [product_1, product_2, weights], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_5.run(buf11, primals_7, buf9, buf10, 256, grid=grid(256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.native_dropout]
buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True)
buf13 = buf12[0]
buf14 = buf12[1]
del buf12
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf5, buf15, 256, grid=grid(256), stream=stream0)
buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
del buf16
buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_dropout]
buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf20 = buf19[0]
buf21 = buf19[1]
del buf19
buf22 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf22, primals_3, 64, grid=grid(64), stream=stream0)
buf23 = buf1; del buf1 # reuse
buf24 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf22, buf23, buf24, 16, grid=grid(16), stream=stream0)
buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf22, buf23, buf24, primals_9, primals_10, buf25, 64, grid=grid(64), stream=stream0)
del primals_10
buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0); del buf26 # reuse
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_7.run(buf27, primals_12, buf39, 64, grid=grid(64), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [hidden_1, hidden_2], Original ATen: [aten.relu, aten.native_dropout]
buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True)
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf31)
del primals_14
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_dropout]
buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf33 = buf32[0]
buf34 = buf32[1]
del buf32
buf35 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf35, buf22, 64, grid=grid(64), stream=stream0)
buf36 = buf24; del buf24 # reuse
buf37 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf35, buf36, buf37, 16, grid=grid(16), stream=stream0)
buf38 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0); del buf31 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf35, buf36, buf37, primals_15, primals_16, buf38, 64, grid=grid(64), stream=stream0)
del buf36
del buf37
del primals_16
return (buf38, primals_3, primals_9, primals_15, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11, buf14, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), buf21, buf22, reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), buf34, buf35, primals_13, buf39, primals_11, primals_8, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6, primals_5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x, y):
return self.func(x, y)
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = nn.ModuleList()
cur_a_len = 0
cur_n_len = 0
cur_d_len = 0
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.add_module('add_res_connect_{}'.format(
cur_a_len), LambdaXY(lambda x, y: x + y if y is not
None else x))
cur_a_len += 1
elif cmd == 'n':
layerNorm = torch.nn.LayerNorm(normalized_shape=d_model,
elementwise_affine=True, eps=1e-05)
self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm
)
cur_n_len += 1
elif cmd == 'd':
self.functors.add_module('add_drop_{}'.format(cur_d_len),
Lambda(lambda x: F.dropout(x, p=dropout_rate) if
dropout_rate else x))
cur_d_len += 1
def forward(self, x, residual=None):
for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)
):
if cmd == 'a':
x = functor(x, residual)
else:
x = functor(x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
class Encoder(nn.Module):
"""
encoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(Encoder, self).__init__()
self.encoder_layers = nn.ModuleList()
for i in range(n_layer):
encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd)
self.encoder_layers.add_module('layer_%d' % i, encoderLayer)
self.processer = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
for encoder_layer in self.encoder_layers:
enc_output = encoder_layer(enc_input, attn_bias)
enc_input = enc_output
enc_output = self.processer(enc_output)
return enc_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_layer': 1, 'n_head': 4, 'd_key': 4, 'd_value': 4,
'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5,
'attention_dropout': 0.5, 'relu_dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True)
buf13 = buf12[0]
buf14 = buf12[1]
del buf12
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf5, buf15, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16
)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf16, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf16
buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18)
buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor(
buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf20 = buf19[0]
buf21 = buf19[1]
del buf19
buf22 = buf20
del buf20
triton_poi_fused_add_6[grid(64)](buf22, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf23 = buf1
del buf1
buf24 = buf0
del buf0
triton_poi_fused_native_layer_norm_0[grid(16)](buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_native_layer_norm_1[grid(64)](buf22, buf23, buf24,
primals_9, primals_10, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_10
buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf27,
primals_12, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True)
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0)
del buf27
extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf31)
del primals_14
buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor(
buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True)
buf33 = buf32[0]
buf34 = buf32[1]
del buf32
buf35 = buf33
del buf33
triton_poi_fused_add_6[grid(64)](buf35, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf36 = buf24
del buf24
buf37 = buf23
del buf23
triton_poi_fused_native_layer_norm_0[grid(16)](buf35, buf36, buf37,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf38 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
triton_poi_fused_native_layer_norm_1[grid(64)](buf35, buf36, buf37,
primals_15, primals_16, buf38, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf36
del buf37
del primals_16
return (buf38, primals_3, primals_9, primals_15, reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), buf11, buf14, reinterpret_tensor(buf17,
(16, 16), (16, 1), 0), buf21, buf22, reinterpret_tensor(buf25, (16,
4), (4, 1), 0), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0
), buf34, buf35, primals_13, buf39, primals_11, primals_8,
reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6,
primals_5, primals_4)
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x, y):
return self.func(x, y)
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = nn.ModuleList()
cur_a_len = 0
cur_n_len = 0
cur_d_len = 0
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.add_module('add_res_connect_{}'.format(
cur_a_len), LambdaXY(lambda x, y: x + y if y is not
None else x))
cur_a_len += 1
elif cmd == 'n':
layerNorm = torch.nn.LayerNorm(normalized_shape=d_model,
elementwise_affine=True, eps=1e-05)
self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm
)
cur_n_len += 1
elif cmd == 'd':
self.functors.add_module('add_drop_{}'.format(cur_d_len),
Lambda(lambda x: F.dropout(x, p=dropout_rate) if
dropout_rate else x))
cur_d_len += 1
def forward(self, x, residual=None):
for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)
):
if cmd == 'a':
x = functor(x, residual)
else:
x = functor(x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head,
attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None,
None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
class EncoderNew(nn.Module):
"""
encoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderNew, self).__init__()
self.encoder_layers = nn.ModuleList()
for i in range(n_layer):
encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd)
self.encoder_layers.add_module('layer_%d' % i, encoderLayer)
self.processer = PrePostProcessLayer(preprocess_cmd, d_model,
prepostprocess_dropout)
def forward(self, input_0, input_1):
primals_1 = (self.encoder_layers.layer_0.preprocesser1.functors.
layer_norm_0.weight)
primals_2 = (self.encoder_layers.layer_0.preprocesser1.functors.
layer_norm_0.bias)
primals_4 = self.encoder_layers.layer_0.self_attn.q_fc.weight
primals_5 = self.encoder_layers.layer_0.self_attn.k_fc.weight
primals_6 = self.encoder_layers.layer_0.self_attn.v_fc.weight
primals_8 = self.encoder_layers.layer_0.self_attn.proj_fc.weight
primals_9 = (self.encoder_layers.layer_0.preprocesser2.functors.
layer_norm_0.weight)
primals_10 = (self.encoder_layers.layer_0.preprocesser2.functors.
layer_norm_0.bias)
primals_11 = self.encoder_layers.layer_0.ffn.fc1.weight
primals_12 = self.encoder_layers.layer_0.ffn.fc1.bias
primals_13 = self.encoder_layers.layer_0.ffn.fc2.weight
primals_14 = self.encoder_layers.layer_0.ffn.fc2.bias
primals_15 = self.processer.functors.layer_norm_0.weight
primals_16 = self.processer.functors.layer_norm_0.bias
primals_3 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
| eminem171333491/PaddleOCR2Pytorch | Encoder | false | 3,471 | [
"Apache-2.0"
] | 0 | ec466bb3a689eccb9290e9f80812a45301d3b030 | https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super().__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=
d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=
d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super().__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key *
n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=
d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head,
out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self
.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head,
self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head,
self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and 'static_k' not in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape
[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `fun
# ... truncated (>4000 chars) for memory efficiency |
ConcatenatedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/u4/cu4v2b2yxjbswks2kqfbqqdsrrm2zn2tr7weaat3ttr5xrhcyuaf.py
# Topologically Sorted Source Nodes: [cat, relu], Original ATen: [aten.cat, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# cat => cat
# relu => relu
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %expand], 2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%cat,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_cat_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_cat_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = (xindex // 8)
x2 = (xindex // 32)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(out_ptr0 + (x3), tmp12, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u2/cu2awcrwgnppugnatxbrhbzcyzowdgftqgbxsyccqghz4vcqwghu.py
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_weighted_encoding => sum_2
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 8), (8, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [cat, relu], Original ATen: [aten.cat, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_relu_threshold_backward_0.run(buf0, buf1, buf2, buf8, 128, grid=grid(128), stream=stream0)
del buf0
buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(primals_3, buf6, buf7, 16, grid=grid(16), stream=stream0)
return (buf7, buf6, primals_3, primals_6, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf6, primals_7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.utils.data
from torch import nn
class ConcatenatedAttention(nn.Module):
"""
ConcatenatedAttention module which uses concatenation of encoder and decoder
attention vectors instead of summing them up
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
@param encoder_dim: feature size of encoded images
@param decoder_dim: size of decoder's RNN
@param attention_dim: size of the attention network
"""
super().__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim * 2, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out)
att2 = self.decoder_att(decoder_hidden).unsqueeze(1)
att2_expanded = att2.expand_as(att1)
att = self.full_att(self.relu(torch.cat([att1, att2_expanded], dim=2))
).squeeze(2)
alpha = self.softmax(att)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(
dim=1)
return attention_weighted_encoding, alpha
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.optim
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = xindex // 8
x2 = xindex // 32
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 8), (8, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_cat_relu_threshold_backward_0[grid(128)](buf0,
buf1, buf2, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_7, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_mul_sum_3[grid(16)](primals_3, buf6, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf7, buf6, primals_3, primals_6, reinterpret_tensor(buf2, (16,
8), (8, 1), 0), buf6, primals_7, buf8
class ConcatenatedAttentionNew(nn.Module):
"""
ConcatenatedAttention module which uses concatenation of encoder and decoder
attention vectors instead of summing them up
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
@param encoder_dim: feature size of encoded images
@param decoder_dim: size of decoder's RNN
@param attention_dim: size of the attention network
"""
super().__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim * 2, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_1 = self.encoder_att.weight
primals_2 = self.encoder_att.bias
primals_4 = self.decoder_att.weight
primals_5 = self.decoder_att.bias
primals_7 = self.full_att.weight
primals_8 = self.full_att.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| enesmsahin/ShowAttendTell | ConcatenatedAttention | false | 3,472 | [
"MIT"
] | 0 | ae94b9a61c3b7e6f2302b9fd4477b6a3e14a33fe | https://github.com/enesmsahin/ShowAttendTell/tree/ae94b9a61c3b7e6f2302b9fd4477b6a3e14a33fe | import torch
import torch.optim
import torch.utils.data
from torch import nn
class Model(nn.Module):
"""
ConcatenatedAttention module which uses concatenation of encoder and decoder
attention vectors instead of summing them up
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
@param encoder_dim: feature size of encoded images
@param decoder_dim: size of decoder's RNN
@param attention_dim: size of the attention network
"""
super().__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim * 2, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out)
att2 = self.decoder_att(decoder_hidden).unsqueeze(1)
att2_expanded = att2.expand_as(att1)
att = self.full_att(self.relu(torch.cat([att1, att2_expanded], dim=2))
).squeeze(2)
alpha = self.softmax(att)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(
dim=1)
return attention_weighted_encoding, alpha
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CVAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [p_x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# p_x => relu
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jc/cjcnafqvje2zopehqpdp7l4n7zi6bupsd6and2qs2atwezpostzr.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# std => exp
# z => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %randn), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %addmm_2), kwargs = {})
triton_poi_fused_add_exp_mul_2 = async_compile.triton('triton_poi_fused_add_exp_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oi/coiisnakolayw62fmtog6kzm2jrufp5vhagan3piufdot4ob6slc.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# z_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-2) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (2, 4), (4, 1))
assert_size_stride(primals_8, (2, ), (1, ))
assert_size_stride(primals_9, (2, 4), (4, 1))
assert_size_stride(primals_10, (2, ), (1, ))
assert_size_stride(primals_11, (4, 6), (6, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [p_x], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [p_x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_10
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf7 = torch.ops.aten.randn.default([4, 2], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
triton_poi_fused_add_exp_mul_2.run(buf6, buf8, buf5, buf9, 8, grid=grid(8), stream=stream0)
buf10 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf9, primals_2, buf10, 24, grid=grid(24), stream=stream0)
del primals_2
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (6, 4), (1, 6), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [q_x], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf12, primals_12, 16, grid=grid(16), stream=stream0)
del primals_12
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf13)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [q_x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf14, primals_14, 16, grid=grid(16), stream=stream0)
del primals_14
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_16, buf14, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_16
return (buf15, buf6, buf5, buf9, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, primals_15, primals_13, primals_11, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 6), (6, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x, c):
x = torch.cat([x, c], 1)
p_x = F.relu(self.fc1(x))
p_x = self.fc2(p_x)
p_x = F.relu(self.fc3(p_x))
return p_x
class LatentZ(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, p_x):
mu = self.mu(p_x)
logvar = self.logvar(p_x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return std * eps + mu, logvar, mu
class Decoder(nn.Module):
def __init__(self, latent_size, condition_size, hidden_size_1,
hidden_size_2, sample_size):
super().__init__()
self.fc1 = nn.Linear(latent_size + condition_size, hidden_size_1)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size_1, hidden_size_2)
self.fc4 = nn.Linear(hidden_size_2, sample_size)
def forward(self, z_x, c):
z = torch.cat([z_x, c], 1)
q_x = F.relu(self.fc1(z))
q_x = self.fc2(q_x)
q_x = F.relu(self.fc3(q_x))
q_x = self.fc4(q_x)
return q_x
class CVAE(nn.Module):
def __init__(self, sample_size, condition_size, hidden_encoder_size,
hidden_decoder_1_size, hidden_decoder_2_size, latent_size=2):
super().__init__()
self.sample_size = sample_size
self.condition_size = condition_size
self.hidden_encoder_size = hidden_encoder_size
self.hidden_decoder_1__size = hidden_decoder_1_size
self.hidden_decoder_2__size = hidden_decoder_2_size
self.latent_size = latent_size
self.encoder = Encoder(sample_size, condition_size, hidden_encoder_size
)
self.latent_z = LatentZ(hidden_encoder_size, latent_size)
self.decoder = Decoder(latent_size, condition_size,
hidden_decoder_1_size, hidden_decoder_2_size, sample_size)
def forward(self, x, c):
p_x = self.encoder(x, c)
z, logvar, mu = self.latent_z(p_x)
q_z = self.decoder(z, c)
return q_z, logvar, mu, z
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'sample_size': 4, 'condition_size': 4,
'hidden_encoder_size': 4, 'hidden_decoder_1_size': 4,
'hidden_decoder_2_size': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 6, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-2 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (2, 4), (4, 1))
assert_size_stride(primals_8, (2,), (1,))
assert_size_stride(primals_9, (2, 4), (4, 1))
assert_size_stride(primals_10, (2,), (1,))
assert_size_stride(primals_11, (4, 6), (6, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9,
(4, 2), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_10
buf7 = torch.ops.aten.randn.default([4, 2], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_add_exp_mul_2[grid(8)](buf6, buf8, buf5, buf9, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused_cat_3[grid(24)](buf9, primals_2, buf10, 24, XBLOCK
=32, num_warps=1, num_stages=1)
del primals_2
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (6, 4), (1,
6), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_1[grid(16)](buf12, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (4, 4), (1,
4), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_1[grid(16)](buf14, primals_14, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_14
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_16, buf14, reinterpret_tensor(
primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_16
return (buf15, buf6, buf5, buf9, buf0, buf2, buf4, buf6, buf8, buf10,
buf12, buf14, primals_15, primals_13, primals_11, primals_9,
primals_7, primals_5)
class Encoder(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x, c):
x = torch.cat([x, c], 1)
p_x = F.relu(self.fc1(x))
p_x = self.fc2(p_x)
p_x = F.relu(self.fc3(p_x))
return p_x
class LatentZ(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, p_x):
mu = self.mu(p_x)
logvar = self.logvar(p_x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return std * eps + mu, logvar, mu
class Decoder(nn.Module):
def __init__(self, latent_size, condition_size, hidden_size_1,
hidden_size_2, sample_size):
super().__init__()
self.fc1 = nn.Linear(latent_size + condition_size, hidden_size_1)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size_1, hidden_size_2)
self.fc4 = nn.Linear(hidden_size_2, sample_size)
def forward(self, z_x, c):
z = torch.cat([z_x, c], 1)
q_x = F.relu(self.fc1(z))
q_x = self.fc2(q_x)
q_x = F.relu(self.fc3(q_x))
q_x = self.fc4(q_x)
return q_x
class CVAENew(nn.Module):
def __init__(self, sample_size, condition_size, hidden_encoder_size,
hidden_decoder_1_size, hidden_decoder_2_size, latent_size=2):
super().__init__()
self.sample_size = sample_size
self.condition_size = condition_size
self.hidden_encoder_size = hidden_encoder_size
self.hidden_decoder_1__size = hidden_decoder_1_size
self.hidden_decoder_2__size = hidden_decoder_2_size
self.latent_size = latent_size
self.encoder = Encoder(sample_size, condition_size, hidden_encoder_size
)
self.latent_z = LatentZ(hidden_encoder_size, latent_size)
self.decoder = Decoder(latent_size, condition_size,
hidden_decoder_1_size, hidden_decoder_2_size, sample_size)
def forward(self, input_0, input_1):
primals_3 = self.encoder.fc1.weight
primals_4 = self.encoder.fc1.bias
primals_1 = self.encoder.fc3.weight
primals_6 = self.encoder.fc3.bias
primals_7 = self.latent_z.mu.weight
primals_8 = self.latent_z.mu.bias
primals_9 = self.latent_z.logvar.weight
primals_10 = self.latent_z.logvar.bias
primals_11 = self.decoder.fc1.weight
primals_12 = self.decoder.fc1.bias
primals_2 = self.decoder.fc3.weight
primals_14 = self.decoder.fc3.bias
primals_5 = self.decoder.fc4.weight
primals_16 = self.decoder.fc4.bias
primals_13 = input_0
primals_15 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0], output[1], output[2], output[3]
| ekrell/learn-planning-space | CVAE | false | 3,473 | [
"MIT"
] | 0 | 730e448bffa4996b2b1ef3a5b00500dc172962ec | https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec | import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, sample_size, condition_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(sample_size + condition_size, hidden_size)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x, c):
x = torch.cat([x, c], 1)
p_x = F.relu(self.fc1(x))
p_x = self.fc2(p_x)
p_x = F.relu(self.fc3(p_x))
return p_x
class LatentZ(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.mu = nn.Linear(hidden_size, latent_size)
self.logvar = nn.Linear(hidden_size, latent_size)
def forward(self, p_x):
mu = self.mu(p_x)
logvar = self.logvar(p_x)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return std * eps + mu, logvar, mu
class Decoder(nn.Module):
def __init__(self, latent_size, condition_size, hidden_size_1,
hidden_size_2, sample_size):
super().__init__()
self.fc1 = nn.Linear(latent_size + condition_size, hidden_size_1)
self.fc2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(hidden_size_1, hidden_size_2)
self.fc4 = nn.Linear(hidden_size_2, sample_size)
def forward(self, z_x, c):
z = torch.cat([z_x, c], 1)
q_x = F.relu(self.fc1(z))
q_x = self.fc2(q_x)
q_x = F.relu(self.fc3(q_x))
q_x = self.fc4(q_x)
return q_x
class Model(nn.Module):
def __init__(self, sample_size, condition_size, hidden_encoder_size,
hidden_decoder_1_size, hidden_decoder_2_size, latent_size=2):
super().__init__()
self.sample_size = sample_size
self.condition_size = condition_size
self.hidden_encoder_size = hidden_encoder_size
self.hidden_decoder_1__size = hidden_decoder_1_size
self.hidden_decoder_2__size = hidden_decoder_2_size
self.latent_size = latent_size
self.encoder = Encoder(sample_size, condition_size, hidden_encoder_size
)
self.latent_z = LatentZ(hidden_encoder_size, latent_size)
self.decoder = Decoder(latent_size, condition_size,
hidden_decoder_1_size, hidden_decoder_2_size, sample_size)
def forward(self, x, c):
p_x = self.encoder(x, c)
z, logvar, mu = self.latent_z(p_x)
q_z = self.decoder(z, c)
return q_z, logvar, mu, z
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'sample_size': 4, 'condition_size': 4,
'hidden_encoder_size': 4, 'hidden_decoder_1_size': 4,
'hidden_decoder_2_size': 4}]
|
TFSamepaddingLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 784, grid=grid(784), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
class TFSamepaddingLayer(nn.Module):
"""To align with tf `same` padding.
Putting this before any conv layer that need padding
Assuming kernel has Height == Width for simplicity
"""
def __init__(self, ksize, stride):
super(TFSamepaddingLayer, self).__init__()
self.ksize = ksize
self.stride = stride
def forward(self, x):
if x.shape[2] % self.stride == 0:
pad = max(self.ksize - self.stride, 0)
else:
pad = max(self.ksize - x.shape[2] % self.stride, 0)
if pad % 2 == 0:
pad_val = pad // 2
padding = pad_val, pad_val, pad_val, pad_val
else:
pad_val_start = pad // 2
pad_val_end = pad - pad_val_start
padding = pad_val_start, pad_val_end, pad_val_start, pad_val_end
x = F.pad(x, padding, 'constant', 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ksize': 4, 'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.multiprocessing
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](arg0_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TFSamepaddingLayerNew(nn.Module):
"""To align with tf `same` padding.
Putting this before any conv layer that need padding
Assuming kernel has Height == Width for simplicity
"""
def __init__(self, ksize, stride):
super(TFSamepaddingLayerNew, self).__init__()
self.ksize = ksize
self.stride = stride
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| essential2189/Cell-Based-Model | TFSamepaddingLayer | false | 3,474 | [
"MIT"
] | 0 | f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e | https://github.com/essential2189/Cell-Based-Model/tree/f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e | import torch
import torch.utils.data
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""To align with tf `same` padding.
Putting this before any conv layer that need padding
Assuming kernel has Height == Width for simplicity
"""
def __init__(self, ksize, stride):
super().__init__()
self.ksize = ksize
self.stride = stride
def forward(self, x):
if x.shape[2] % self.stride == 0:
pad = max(self.ksize - self.stride, 0)
else:
pad = max(self.ksize - x.shape[2] % self.stride, 0)
if pad % 2 == 0:
pad_val = pad // 2
padding = pad_val, pad_val, pad_val, pad_val
else:
pad_val_start = pad // 2
pad_val_end = pad - pad_val_start
padding = pad_val_start, pad_val_end, pad_val_start, pad_val_end
x = F.pad(x, padding, 'constant', 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1]
|
SmoothL1Loss | # AOT ID: ['1_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v7/cv7afe46zsb74ityzlsvofo5jna4xm7cze67hsl2svrgouto6vvu.py
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss, mul_2, sum_1, truediv_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where, aten.sum]
# Source node to ATen node mapping:
# diff => abs_1
# loss => where
# lt => lt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=4] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %abs_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze, 64.000001), kwargs = {})
triton_per_fused_abs_div_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_div_lt_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_div_lt_mul_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_div_lt_mul_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp12 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp6
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp13 = tmp11 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 0.015624999755859379
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = reinterpret_tensor(buf0, (1, ), (1, ), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss, mul_2, sum_1, truediv_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_div_lt_mul_sub_sum_where_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta
)
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06
loss = smooth_l1_loss(pred, target, beta, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1.0, loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.loss_weight = loss_weight
def forward(self, pred, target, weight, *args, **kwargs):
loss_bbox = self.loss_weight * weighted_smoothl1(pred, target,
weight, *args, beta=self.beta, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_div_lt_mul_sub_sum_where_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp12 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp6
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp13 = tmp11 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 0.015624999755859379
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = reinterpret_tensor(buf0, (1,), (1,), 0)
del buf0
get_raw_stream(0)
triton_per_fused_abs_div_lt_mul_sub_sum_where_0[grid(1)](buf1,
arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta
)
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06
loss = smooth_l1_loss(pred, target, beta, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
class SmoothL1LossNew(nn.Module):
def __init__(self, beta=1.0, loss_weight=1.0):
super(SmoothL1LossNew, self).__init__()
self.beta = beta
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| es6rc/icevision | SmoothL1Loss | false | 3,475 | [
"Apache-2.0"
] | 0 | bb78dd2e1721c2edb82fb9c1a826fe301541d2a1 | https://github.com/es6rc/icevision/tree/bb78dd2e1721c2edb82fb9c1a826fe301541d2a1 | import torch
import torch.nn.functional as F
import torch.nn as nn
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta
)
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06
loss = smooth_l1_loss(pred, target, beta, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
class Model(nn.Module):
def __init__(self, beta=1.0, loss_weight=1.0):
super().__init__()
self.beta = beta
self.loss_weight = loss_weight
def forward(self, pred, target, weight, *args, **kwargs):
loss_bbox = self.loss_weight * weighted_smoothl1(pred, target,
weight, *args, beta=self.beta, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
CrossEntropyLoss | # AOT ID: ['1_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [raw], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# raw => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/55/c55jnxqzctcsykbux55atvovnot3atqg2zkgotvahahcn7zcnzea.py
# Topologically Sorted Source Nodes: [raw], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg]
# Source node to ATen node mapping:
# raw => exp, log, mul, neg, sub_1, sum_1, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
triton_poi_fused__log_softmax_mul_neg_sum_1 = async_compile.triton('triton_poi_fused__log_softmax_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_mul_neg_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp20 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oq/coqhxax33pmzexfkbulkflkivvax74zyk4i7m4qkmnt5livgwyoc.py
# Topologically Sorted Source Nodes: [raw, mul, sum_1, truediv], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# mul => mul_1
# raw => exp, log, mul, neg, sub_1, sum_1, sum_2
# sum_1 => sum_3
# truediv => div
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg2_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze, 256.0), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_2 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r2), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 0.00390625
tmp7 = tmp5 * tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [raw], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [raw], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg]
triton_poi_fused__log_softmax_mul_neg_sum_1.run(buf0, arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = reinterpret_tensor(buf2, (1, ), (1, ), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [raw, mul, sum_1, truediv], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_2.run(buf3, buf1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction
='mean')[None]
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size
(0), label_channels)
return bin_labels, bin_label_weights
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
return F.binary_cross_entropy_with_logits(pred, label.float(), weight.
float(), reduction='sum')[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
class CrossEntropyLoss(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.loss_weight = loss_weight
if self.use_sigmoid:
self.cls_criterion = weighted_binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = weighted_cross_entropy
def forward(self, cls_score, label, label_weight, *args, **kwargs):
loss_cls = self.loss_weight * self.cls_criterion(cls_score, label,
label_weight, *args, **kwargs)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + r2, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 0.00390625
tmp7 = tmp5 * tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = reinterpret_tensor(buf2, (1,), (1,), 0)
del buf2
triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf3, buf1,
arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf1
return buf3,
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction
='mean')[None]
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size
(0), label_channels)
return bin_labels, bin_label_weights
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
return F.binary_cross_entropy_with_logits(pred, label.float(), weight.
float(), reduction='sum')[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
class CrossEntropyLossNew(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0):
super(CrossEntropyLossNew, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.loss_weight = loss_weight
if self.use_sigmoid:
self.cls_criterion = weighted_binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = weighted_cross_entropy
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| es6rc/icevision | CrossEntropyLoss | false | 3,476 | [
"Apache-2.0"
] | 0 | bb78dd2e1721c2edb82fb9c1a826fe301541d2a1 | https://github.com/es6rc/icevision/tree/bb78dd2e1721c2edb82fb9c1a826fe301541d2a1 | import torch
import torch.nn.functional as F
import torch.nn as nn
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction
='mean')[None]
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size
(0), label_channels)
return bin_labels, bin_label_weights
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
return F.binary_cross_entropy_with_logits(pred, label.float(), weight.
float(), reduction='sum')[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.0)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
class Model(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0):
super().__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.loss_weight = loss_weight
if self.use_sigmoid:
self.cls_criterion = weighted_binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = weighted_cross_entropy
def forward(self, cls_score, label, label_weight, *args, **kwargs):
loss_cls = self.loss_weight * self.cls_criterion(cls_score, label,
label_weight, *args, **kwargs)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.